cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

fpu_helper.c (132667B)


      1/*
      2 *  PowerPC floating point and SPE emulation helpers for QEMU.
      3 *
      4 *  Copyright (c) 2003-2007 Jocelyn Mayer
      5 *
      6 * This library is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU Lesser General Public
      8 * License as published by the Free Software Foundation; either
      9 * version 2.1 of the License, or (at your option) any later version.
     10 *
     11 * This library is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 * Lesser General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU Lesser General Public
     17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18 */
     19#include "qemu/osdep.h"
     20#include "cpu.h"
     21#include "exec/helper-proto.h"
     22#include "exec/exec-all.h"
     23#include "internal.h"
     24#include "fpu/softfloat.h"
     25
     26static inline float128 float128_snan_to_qnan(float128 x)
     27{
     28    float128 r;
     29
     30    r.high = x.high | 0x0000800000000000;
     31    r.low = x.low;
     32    return r;
     33}
     34
     35#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
     36#define float32_snan_to_qnan(x) ((x) | 0x00400000)
     37#define float16_snan_to_qnan(x) ((x) | 0x0200)
     38
     39static inline bool fp_exceptions_enabled(CPUPPCState *env)
     40{
     41#ifdef CONFIG_USER_ONLY
     42    return true;
     43#else
     44    return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
     45#endif
     46}
     47
     48/*****************************************************************************/
     49/* Floating point operations helpers */
     50
     51/*
     52 * This is the non-arithmatic conversion that happens e.g. on loads.
     53 * In the Power ISA pseudocode, this is called DOUBLE.
     54 */
     55uint64_t helper_todouble(uint32_t arg)
     56{
     57    uint32_t abs_arg = arg & 0x7fffffff;
     58    uint64_t ret;
     59
     60    if (likely(abs_arg >= 0x00800000)) {
     61        if (unlikely(extract32(arg, 23, 8) == 0xff)) {
     62            /* Inf or NAN.  */
     63            ret  = (uint64_t)extract32(arg, 31, 1) << 63;
     64            ret |= (uint64_t)0x7ff << 52;
     65            ret |= (uint64_t)extract32(arg, 0, 23) << 29;
     66        } else {
     67            /* Normalized operand.  */
     68            ret  = (uint64_t)extract32(arg, 30, 2) << 62;
     69            ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
     70            ret |= (uint64_t)extract32(arg, 0, 30) << 29;
     71        }
     72    } else {
     73        /* Zero or Denormalized operand.  */
     74        ret = (uint64_t)extract32(arg, 31, 1) << 63;
     75        if (unlikely(abs_arg != 0)) {
     76            /*
     77             * Denormalized operand.
     78             * Shift fraction so that the msb is in the implicit bit position.
     79             * Thus, shift is in the range [1:23].
     80             */
     81            int shift = clz32(abs_arg) - 8;
     82            /*
     83             * The first 3 terms compute the float64 exponent.  We then bias
     84             * this result by -1 so that we can swallow the implicit bit below.
     85             */
     86            int exp = -126 - shift + 1023 - 1;
     87
     88            ret |= (uint64_t)exp << 52;
     89            ret += (uint64_t)abs_arg << (52 - 23 + shift);
     90        }
     91    }
     92    return ret;
     93}
     94
     95/*
     96 * This is the non-arithmatic conversion that happens e.g. on stores.
     97 * In the Power ISA pseudocode, this is called SINGLE.
     98 */
     99uint32_t helper_tosingle(uint64_t arg)
    100{
    101    int exp = extract64(arg, 52, 11);
    102    uint32_t ret;
    103
    104    if (likely(exp > 896)) {
    105        /* No denormalization required (includes Inf, NaN).  */
    106        ret  = extract64(arg, 62, 2) << 30;
    107        ret |= extract64(arg, 29, 30);
    108    } else {
    109        /*
    110         * Zero or Denormal result.  If the exponent is in bounds for
    111         * a single-precision denormal result, extract the proper
    112         * bits.  If the input is not zero, and the exponent is out of
    113         * bounds, then the result is undefined; this underflows to
    114         * zero.
    115         */
    116        ret = extract64(arg, 63, 1) << 31;
    117        if (unlikely(exp >= 874)) {
    118            /* Denormal result.  */
    119            ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
    120        }
    121    }
    122    return ret;
    123}
    124
    125static inline int ppc_float32_get_unbiased_exp(float32 f)
    126{
    127    return ((f >> 23) & 0xFF) - 127;
    128}
    129
    130static inline int ppc_float64_get_unbiased_exp(float64 f)
    131{
    132    return ((f >> 52) & 0x7FF) - 1023;
    133}
    134
    135/* Classify a floating-point number.  */
    136enum {
    137    is_normal   = 1,
    138    is_zero     = 2,
    139    is_denormal = 4,
    140    is_inf      = 8,
    141    is_qnan     = 16,
    142    is_snan     = 32,
    143    is_neg      = 64,
    144};
    145
    146#define COMPUTE_CLASS(tp)                                      \
    147static int tp##_classify(tp arg)                               \
    148{                                                              \
    149    int ret = tp##_is_neg(arg) * is_neg;                       \
    150    if (unlikely(tp##_is_any_nan(arg))) {                      \
    151        float_status dummy = { };  /* snan_bit_is_one = 0 */   \
    152        ret |= (tp##_is_signaling_nan(arg, &dummy)             \
    153                ? is_snan : is_qnan);                          \
    154    } else if (unlikely(tp##_is_infinity(arg))) {              \
    155        ret |= is_inf;                                         \
    156    } else if (tp##_is_zero(arg)) {                            \
    157        ret |= is_zero;                                        \
    158    } else if (tp##_is_zero_or_denormal(arg)) {                \
    159        ret |= is_denormal;                                    \
    160    } else {                                                   \
    161        ret |= is_normal;                                      \
    162    }                                                          \
    163    return ret;                                                \
    164}
    165
    166COMPUTE_CLASS(float16)
    167COMPUTE_CLASS(float32)
    168COMPUTE_CLASS(float64)
    169COMPUTE_CLASS(float128)
    170
    171static void set_fprf_from_class(CPUPPCState *env, int class)
    172{
    173    static const uint8_t fprf[6][2] = {
    174        { 0x04, 0x08 },  /* normalized */
    175        { 0x02, 0x12 },  /* zero */
    176        { 0x14, 0x18 },  /* denormalized */
    177        { 0x05, 0x09 },  /* infinity */
    178        { 0x11, 0x11 },  /* qnan */
    179        { 0x00, 0x00 },  /* snan -- flags are undefined */
    180    };
    181    bool isneg = class & is_neg;
    182
    183    env->fpscr &= ~FP_FPRF;
    184    env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
    185}
    186
    187#define COMPUTE_FPRF(tp)                                \
    188void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
    189{                                                       \
    190    set_fprf_from_class(env, tp##_classify(arg));       \
    191}
    192
    193COMPUTE_FPRF(float16)
    194COMPUTE_FPRF(float32)
    195COMPUTE_FPRF(float64)
    196COMPUTE_FPRF(float128)
    197
    198/* Floating-point invalid operations exception */
    199static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
    200{
    201    /* Update the floating-point invalid operation summary */
    202    env->fpscr |= FP_VX;
    203    /* Update the floating-point exception summary */
    204    env->fpscr |= FP_FX;
    205    if (fpscr_ve != 0) {
    206        /* Update the floating-point enabled exception summary */
    207        env->fpscr |= FP_FEX;
    208        if (fp_exceptions_enabled(env)) {
    209            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    210                                   POWERPC_EXCP_FP | op, retaddr);
    211        }
    212    }
    213}
    214
    215static void finish_invalid_op_arith(CPUPPCState *env, int op,
    216                                    bool set_fpcc, uintptr_t retaddr)
    217{
    218    env->fpscr &= ~(FP_FR | FP_FI);
    219    if (fpscr_ve == 0) {
    220        if (set_fpcc) {
    221            env->fpscr &= ~FP_FPCC;
    222            env->fpscr |= (FP_C | FP_FU);
    223        }
    224    }
    225    finish_invalid_op_excp(env, op, retaddr);
    226}
    227
    228/* Signalling NaN */
    229static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
    230{
    231    env->fpscr |= FP_VXSNAN;
    232    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
    233}
    234
    235/* Magnitude subtraction of infinities */
    236static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
    237                                   uintptr_t retaddr)
    238{
    239    env->fpscr |= FP_VXISI;
    240    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
    241}
    242
    243/* Division of infinity by infinity */
    244static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
    245                                   uintptr_t retaddr)
    246{
    247    env->fpscr |= FP_VXIDI;
    248    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
    249}
    250
    251/* Division of zero by zero */
    252static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
    253                                   uintptr_t retaddr)
    254{
    255    env->fpscr |= FP_VXZDZ;
    256    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
    257}
    258
    259/* Multiplication of zero by infinity */
    260static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
    261                                   uintptr_t retaddr)
    262{
    263    env->fpscr |= FP_VXIMZ;
    264    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
    265}
    266
    267/* Square root of a negative number */
    268static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
    269                                    uintptr_t retaddr)
    270{
    271    env->fpscr |= FP_VXSQRT;
    272    finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
    273}
    274
    275/* Ordered comparison of NaN */
    276static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
    277                                  uintptr_t retaddr)
    278{
    279    env->fpscr |= FP_VXVC;
    280    if (set_fpcc) {
    281        env->fpscr &= ~FP_FPCC;
    282        env->fpscr |= (FP_C | FP_FU);
    283    }
    284    /* Update the floating-point invalid operation summary */
    285    env->fpscr |= FP_VX;
    286    /* Update the floating-point exception summary */
    287    env->fpscr |= FP_FX;
    288    /* We must update the target FPR before raising the exception */
    289    if (fpscr_ve != 0) {
    290        CPUState *cs = env_cpu(env);
    291
    292        cs->exception_index = POWERPC_EXCP_PROGRAM;
    293        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
    294        /* Update the floating-point enabled exception summary */
    295        env->fpscr |= FP_FEX;
    296        /* Exception is deferred */
    297    }
    298}
    299
    300/* Invalid conversion */
    301static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
    302                                   uintptr_t retaddr)
    303{
    304    env->fpscr |= FP_VXCVI;
    305    env->fpscr &= ~(FP_FR | FP_FI);
    306    if (fpscr_ve == 0) {
    307        if (set_fpcc) {
    308            env->fpscr &= ~FP_FPCC;
    309            env->fpscr |= (FP_C | FP_FU);
    310        }
    311    }
    312    finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
    313}
    314
    315static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
    316{
    317    env->fpscr |= FP_ZX;
    318    env->fpscr &= ~(FP_FR | FP_FI);
    319    /* Update the floating-point exception summary */
    320    env->fpscr |= FP_FX;
    321    if (fpscr_ze != 0) {
    322        /* Update the floating-point enabled exception summary */
    323        env->fpscr |= FP_FEX;
    324        if (fp_exceptions_enabled(env)) {
    325            raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
    326                                   POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
    327                                   raddr);
    328        }
    329    }
    330}
    331
    332static inline void float_overflow_excp(CPUPPCState *env)
    333{
    334    CPUState *cs = env_cpu(env);
    335
    336    env->fpscr |= FP_OX;
    337    /* Update the floating-point exception summary */
    338    env->fpscr |= FP_FX;
    339    if (fpscr_oe != 0) {
    340        /* XXX: should adjust the result */
    341        /* Update the floating-point enabled exception summary */
    342        env->fpscr |= FP_FEX;
    343        /* We must update the target FPR before raising the exception */
    344        cs->exception_index = POWERPC_EXCP_PROGRAM;
    345        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
    346    } else {
    347        env->fpscr |= FP_XX;
    348        env->fpscr |= FP_FI;
    349    }
    350}
    351
    352static inline void float_underflow_excp(CPUPPCState *env)
    353{
    354    CPUState *cs = env_cpu(env);
    355
    356    env->fpscr |= FP_UX;
    357    /* Update the floating-point exception summary */
    358    env->fpscr |= FP_FX;
    359    if (fpscr_ue != 0) {
    360        /* XXX: should adjust the result */
    361        /* Update the floating-point enabled exception summary */
    362        env->fpscr |= FP_FEX;
    363        /* We must update the target FPR before raising the exception */
    364        cs->exception_index = POWERPC_EXCP_PROGRAM;
    365        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
    366    }
    367}
    368
    369static inline void float_inexact_excp(CPUPPCState *env)
    370{
    371    CPUState *cs = env_cpu(env);
    372
    373    env->fpscr |= FP_FI;
    374    env->fpscr |= FP_XX;
    375    /* Update the floating-point exception summary */
    376    env->fpscr |= FP_FX;
    377    if (fpscr_xe != 0) {
    378        /* Update the floating-point enabled exception summary */
    379        env->fpscr |= FP_FEX;
    380        /* We must update the target FPR before raising the exception */
    381        cs->exception_index = POWERPC_EXCP_PROGRAM;
    382        env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
    383    }
    384}
    385
    386void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
    387{
    388    uint32_t mask = 1u << bit;
    389    if (env->fpscr & mask) {
    390        ppc_store_fpscr(env, env->fpscr & ~(target_ulong)mask);
    391    }
    392}
    393
    394void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
    395{
    396    uint32_t mask = 1u << bit;
    397    if (!(env->fpscr & mask)) {
    398        ppc_store_fpscr(env, env->fpscr | mask);
    399    }
    400}
    401
    402void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
    403{
    404    target_ulong mask = 0;
    405    int i;
    406
    407    /* TODO: push this extension back to translation time */
    408    for (i = 0; i < sizeof(target_ulong) * 2; i++) {
    409        if (nibbles & (1 << i)) {
    410            mask |= (target_ulong) 0xf << (4 * i);
    411        }
    412    }
    413    val = (val & mask) | (env->fpscr & ~mask);
    414    ppc_store_fpscr(env, val);
    415}
    416
    417static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
    418{
    419    CPUState *cs = env_cpu(env);
    420    int status = get_float_exception_flags(&env->fp_status);
    421
    422    if (status & float_flag_overflow) {
    423        float_overflow_excp(env);
    424    } else if (status & float_flag_underflow) {
    425        float_underflow_excp(env);
    426    }
    427    if (status & float_flag_inexact) {
    428        float_inexact_excp(env);
    429    } else {
    430        env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */
    431    }
    432
    433    if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
    434        (env->error_code & POWERPC_EXCP_FP)) {
    435        /* Deferred floating-point exception after target FPR update */
    436        if (fp_exceptions_enabled(env)) {
    437            raise_exception_err_ra(env, cs->exception_index,
    438                                   env->error_code, raddr);
    439        }
    440    }
    441}
    442
    443void helper_float_check_status(CPUPPCState *env)
    444{
    445    do_float_check_status(env, GETPC());
    446}
    447
    448void helper_reset_fpstatus(CPUPPCState *env)
    449{
    450    set_float_exception_flags(0, &env->fp_status);
    451}
    452
    453static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
    454                                    uintptr_t retaddr, int classes)
    455{
    456    if ((classes & ~is_neg) == is_inf) {
    457        /* Magnitude subtraction of infinities */
    458        float_invalid_op_vxisi(env, set_fpcc, retaddr);
    459    } else if (classes & is_snan) {
    460        float_invalid_op_vxsnan(env, retaddr);
    461    }
    462}
    463
    464/* fadd - fadd. */
    465float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
    466{
    467    float64 ret = float64_add(arg1, arg2, &env->fp_status);
    468    int status = get_float_exception_flags(&env->fp_status);
    469
    470    if (unlikely(status & float_flag_invalid)) {
    471        float_invalid_op_addsub(env, 1, GETPC(),
    472                                float64_classify(arg1) |
    473                                float64_classify(arg2));
    474    }
    475
    476    return ret;
    477}
    478
    479/* fsub - fsub. */
    480float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
    481{
    482    float64 ret = float64_sub(arg1, arg2, &env->fp_status);
    483    int status = get_float_exception_flags(&env->fp_status);
    484
    485    if (unlikely(status & float_flag_invalid)) {
    486        float_invalid_op_addsub(env, 1, GETPC(),
    487                                float64_classify(arg1) |
    488                                float64_classify(arg2));
    489    }
    490
    491    return ret;
    492}
    493
    494static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
    495                                 uintptr_t retaddr, int classes)
    496{
    497    if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
    498        /* Multiplication of zero by infinity */
    499        float_invalid_op_vximz(env, set_fprc, retaddr);
    500    } else if (classes & is_snan) {
    501        float_invalid_op_vxsnan(env, retaddr);
    502    }
    503}
    504
    505/* fmul - fmul. */
    506float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
    507{
    508    float64 ret = float64_mul(arg1, arg2, &env->fp_status);
    509    int status = get_float_exception_flags(&env->fp_status);
    510
    511    if (unlikely(status & float_flag_invalid)) {
    512        float_invalid_op_mul(env, 1, GETPC(),
    513                             float64_classify(arg1) |
    514                             float64_classify(arg2));
    515    }
    516
    517    return ret;
    518}
    519
    520static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
    521                                 uintptr_t retaddr, int classes)
    522{
    523    classes &= ~is_neg;
    524    if (classes == is_inf) {
    525        /* Division of infinity by infinity */
    526        float_invalid_op_vxidi(env, set_fprc, retaddr);
    527    } else if (classes == is_zero) {
    528        /* Division of zero by zero */
    529        float_invalid_op_vxzdz(env, set_fprc, retaddr);
    530    } else if (classes & is_snan) {
    531        float_invalid_op_vxsnan(env, retaddr);
    532    }
    533}
    534
    535/* fdiv - fdiv. */
    536float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
    537{
    538    float64 ret = float64_div(arg1, arg2, &env->fp_status);
    539    int status = get_float_exception_flags(&env->fp_status);
    540
    541    if (unlikely(status)) {
    542        if (status & float_flag_invalid) {
    543            float_invalid_op_div(env, 1, GETPC(),
    544                                 float64_classify(arg1) |
    545                                 float64_classify(arg2));
    546        }
    547        if (status & float_flag_divbyzero) {
    548            float_zero_divide_excp(env, GETPC());
    549        }
    550    }
    551
    552    return ret;
    553}
    554
    555static void float_invalid_cvt(CPUPPCState *env, bool set_fprc,
    556                              uintptr_t retaddr, int class1)
    557{
    558    float_invalid_op_vxcvi(env, set_fprc, retaddr);
    559    if (class1 & is_snan) {
    560        float_invalid_op_vxsnan(env, retaddr);
    561    }
    562}
    563
    564#define FPU_FCTI(op, cvt, nanval)                                      \
    565uint64_t helper_##op(CPUPPCState *env, float64 arg)                    \
    566{                                                                      \
    567    uint64_t ret = float64_to_##cvt(arg, &env->fp_status);             \
    568    int status = get_float_exception_flags(&env->fp_status);           \
    569                                                                       \
    570    if (unlikely(status)) {                                            \
    571        if (status & float_flag_invalid) {                             \
    572            float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
    573            ret = nanval;                                              \
    574        }                                                              \
    575        do_float_check_status(env, GETPC());                           \
    576    }                                                                  \
    577    return ret;                                                        \
    578}
    579
    580FPU_FCTI(fctiw, int32, 0x80000000U)
    581FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
    582FPU_FCTI(fctiwu, uint32, 0x00000000U)
    583FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
    584FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
    585FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
    586FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
    587FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
    588
    589#define FPU_FCFI(op, cvtr, is_single)                      \
    590uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
    591{                                                          \
    592    CPU_DoubleU farg;                                      \
    593                                                           \
    594    if (is_single) {                                       \
    595        float32 tmp = cvtr(arg, &env->fp_status);          \
    596        farg.d = float32_to_float64(tmp, &env->fp_status); \
    597    } else {                                               \
    598        farg.d = cvtr(arg, &env->fp_status);               \
    599    }                                                      \
    600    do_float_check_status(env, GETPC());                   \
    601    return farg.ll;                                        \
    602}
    603
    604FPU_FCFI(fcfid, int64_to_float64, 0)
    605FPU_FCFI(fcfids, int64_to_float32, 1)
    606FPU_FCFI(fcfidu, uint64_to_float64, 0)
    607FPU_FCFI(fcfidus, uint64_to_float32, 1)
    608
    609static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
    610                              int rounding_mode)
    611{
    612    CPU_DoubleU farg;
    613    FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status);
    614
    615    farg.ll = arg;
    616
    617    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
    618        /* sNaN round */
    619        float_invalid_op_vxsnan(env, GETPC());
    620        farg.ll = arg | 0x0008000000000000ULL;
    621    } else {
    622        int inexact = get_float_exception_flags(&env->fp_status) &
    623                      float_flag_inexact;
    624        set_float_rounding_mode(rounding_mode, &env->fp_status);
    625        farg.ll = float64_round_to_int(farg.d, &env->fp_status);
    626        set_float_rounding_mode(old_rounding_mode, &env->fp_status);
    627
    628        /* fri* does not set FPSCR[XX] */
    629        if (!inexact) {
    630            env->fp_status.float_exception_flags &= ~float_flag_inexact;
    631        }
    632    }
    633    do_float_check_status(env, GETPC());
    634    return farg.ll;
    635}
    636
    637uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
    638{
    639    return do_fri(env, arg, float_round_ties_away);
    640}
    641
    642uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
    643{
    644    return do_fri(env, arg, float_round_to_zero);
    645}
    646
    647uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
    648{
    649    return do_fri(env, arg, float_round_up);
    650}
    651
    652uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
    653{
    654    return do_fri(env, arg, float_round_down);
    655}
    656
    657#define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
    658static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
    659                 unsigned int madd_flags, uintptr_t retaddr)            \
    660{                                                                       \
    661    if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
    662        TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
    663        TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
    664        /* sNaN operation */                                            \
    665        float_invalid_op_vxsnan(env, retaddr);                          \
    666    }                                                                   \
    667    if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
    668        (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
    669        /* Multiplication of zero by infinity */                        \
    670        float_invalid_op_vximz(env, 1, retaddr);                        \
    671    }                                                                   \
    672    if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
    673        TP##_is_infinity(arg3)) {                                       \
    674        uint8_t aSign, bSign, cSign;                                    \
    675                                                                        \
    676        aSign = TP##_is_neg(arg1);                                      \
    677        bSign = TP##_is_neg(arg2);                                      \
    678        cSign = TP##_is_neg(arg3);                                      \
    679        if (madd_flags & float_muladd_negate_c) {                       \
    680            cSign ^= 1;                                                 \
    681        }                                                               \
    682        if (aSign ^ bSign ^ cSign) {                                    \
    683            float_invalid_op_vxisi(env, 1, retaddr);                    \
    684        }                                                               \
    685    }                                                                   \
    686}
    687FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
    688FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
    689
    690#define FPU_FMADD(op, madd_flags)                                       \
    691uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
    692                     uint64_t arg2, uint64_t arg3)                      \
    693{                                                                       \
    694    uint32_t flags;                                                     \
    695    float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
    696                                 &env->fp_status);                      \
    697    flags = get_float_exception_flags(&env->fp_status);                 \
    698    if (flags) {                                                        \
    699        if (flags & float_flag_invalid) {                               \
    700            float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
    701                                        madd_flags, GETPC());           \
    702        }                                                               \
    703        do_float_check_status(env, GETPC());                            \
    704    }                                                                   \
    705    return ret;                                                         \
    706}
    707
    708#define MADD_FLGS 0
    709#define MSUB_FLGS float_muladd_negate_c
    710#define NMADD_FLGS float_muladd_negate_result
    711#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
    712
    713FPU_FMADD(fmadd, MADD_FLGS)
    714FPU_FMADD(fnmadd, NMADD_FLGS)
    715FPU_FMADD(fmsub, MSUB_FLGS)
    716FPU_FMADD(fnmsub, NMSUB_FLGS)
    717
    718/* frsp - frsp. */
    719uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
    720{
    721    CPU_DoubleU farg;
    722    float32 f32;
    723
    724    farg.ll = arg;
    725
    726    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
    727        float_invalid_op_vxsnan(env, GETPC());
    728    }
    729    f32 = float64_to_float32(farg.d, &env->fp_status);
    730    farg.d = float32_to_float64(f32, &env->fp_status);
    731
    732    return farg.ll;
    733}
    734
    735/* fsqrt - fsqrt. */
    736float64 helper_fsqrt(CPUPPCState *env, float64 arg)
    737{
    738    float64 ret = float64_sqrt(arg, &env->fp_status);
    739    int status = get_float_exception_flags(&env->fp_status);
    740
    741    if (unlikely(status & float_flag_invalid)) {
    742        if (unlikely(float64_is_any_nan(arg))) {
    743            if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
    744                /* sNaN square root */
    745                float_invalid_op_vxsnan(env, GETPC());
    746            }
    747        } else {
    748            /* Square root of a negative nonzero number */
    749            float_invalid_op_vxsqrt(env, 1, GETPC());
    750        }
    751    }
    752
    753    return ret;
    754}
    755
    756/* fre - fre. */
    757float64 helper_fre(CPUPPCState *env, float64 arg)
    758{
    759    /* "Estimate" the reciprocal with actual division.  */
    760    float64 ret = float64_div(float64_one, arg, &env->fp_status);
    761    int status = get_float_exception_flags(&env->fp_status);
    762
    763    if (unlikely(status)) {
    764        if (status & float_flag_invalid) {
    765            if (float64_is_signaling_nan(arg, &env->fp_status)) {
    766                /* sNaN reciprocal */
    767                float_invalid_op_vxsnan(env, GETPC());
    768            }
    769        }
    770        if (status & float_flag_divbyzero) {
    771            float_zero_divide_excp(env, GETPC());
    772            /* For FPSCR.ZE == 0, the result is 1/2.  */
    773            ret = float64_set_sign(float64_half, float64_is_neg(arg));
    774        }
    775    }
    776
    777    return ret;
    778}
    779
    780/* fres - fres. */
    781uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
    782{
    783    CPU_DoubleU farg;
    784    float32 f32;
    785
    786    farg.ll = arg;
    787
    788    if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
    789        /* sNaN reciprocal */
    790        float_invalid_op_vxsnan(env, GETPC());
    791    }
    792    farg.d = float64_div(float64_one, farg.d, &env->fp_status);
    793    f32 = float64_to_float32(farg.d, &env->fp_status);
    794    farg.d = float32_to_float64(f32, &env->fp_status);
    795
    796    return farg.ll;
    797}
    798
    799/* frsqrte  - frsqrte. */
    800float64 helper_frsqrte(CPUPPCState *env, float64 arg)
    801{
    802    /* "Estimate" the reciprocal with actual division.  */
    803    float64 rets = float64_sqrt(arg, &env->fp_status);
    804    float64 retd = float64_div(float64_one, rets, &env->fp_status);
    805    int status = get_float_exception_flags(&env->fp_status);
    806
    807    if (unlikely(status)) {
    808        if (status & float_flag_invalid) {
    809            if (float64_is_signaling_nan(arg, &env->fp_status)) {
    810                /* sNaN reciprocal */
    811                float_invalid_op_vxsnan(env, GETPC());
    812            } else {
    813                /* Square root of a negative nonzero number */
    814                float_invalid_op_vxsqrt(env, 1, GETPC());
    815            }
    816        }
    817        if (status & float_flag_divbyzero) {
    818            /* Reciprocal of (square root of) zero.  */
    819            float_zero_divide_excp(env, GETPC());
    820        }
    821    }
    822
    823    return retd;
    824}
    825
    826/* fsel - fsel. */
    827uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
    828                     uint64_t arg3)
    829{
    830    CPU_DoubleU farg1;
    831
    832    farg1.ll = arg1;
    833
    834    if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
    835        !float64_is_any_nan(farg1.d)) {
    836        return arg2;
    837    } else {
    838        return arg3;
    839    }
    840}
    841
    842uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
    843{
    844    int fe_flag = 0;
    845    int fg_flag = 0;
    846
    847    if (unlikely(float64_is_infinity(fra) ||
    848                 float64_is_infinity(frb) ||
    849                 float64_is_zero(frb))) {
    850        fe_flag = 1;
    851        fg_flag = 1;
    852    } else {
    853        int e_a = ppc_float64_get_unbiased_exp(fra);
    854        int e_b = ppc_float64_get_unbiased_exp(frb);
    855
    856        if (unlikely(float64_is_any_nan(fra) ||
    857                     float64_is_any_nan(frb))) {
    858            fe_flag = 1;
    859        } else if ((e_b <= -1022) || (e_b >= 1021)) {
    860            fe_flag = 1;
    861        } else if (!float64_is_zero(fra) &&
    862                   (((e_a - e_b) >= 1023) ||
    863                    ((e_a - e_b) <= -1021) ||
    864                    (e_a <= -970))) {
    865            fe_flag = 1;
    866        }
    867
    868        if (unlikely(float64_is_zero_or_denormal(frb))) {
    869            /* XB is not zero because of the above check and */
    870            /* so must be denormalized.                      */
    871            fg_flag = 1;
    872        }
    873    }
    874
    875    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
    876}
    877
    878uint32_t helper_ftsqrt(uint64_t frb)
    879{
    880    int fe_flag = 0;
    881    int fg_flag = 0;
    882
    883    if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
    884        fe_flag = 1;
    885        fg_flag = 1;
    886    } else {
    887        int e_b = ppc_float64_get_unbiased_exp(frb);
    888
    889        if (unlikely(float64_is_any_nan(frb))) {
    890            fe_flag = 1;
    891        } else if (unlikely(float64_is_zero(frb))) {
    892            fe_flag = 1;
    893        } else if (unlikely(float64_is_neg(frb))) {
    894            fe_flag = 1;
    895        } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
    896            fe_flag = 1;
    897        }
    898
    899        if (unlikely(float64_is_zero_or_denormal(frb))) {
    900            /* XB is not zero because of the above check and */
    901            /* therefore must be denormalized.               */
    902            fg_flag = 1;
    903        }
    904    }
    905
    906    return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
    907}
    908
    909void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
    910                  uint32_t crfD)
    911{
    912    CPU_DoubleU farg1, farg2;
    913    uint32_t ret = 0;
    914
    915    farg1.ll = arg1;
    916    farg2.ll = arg2;
    917
    918    if (unlikely(float64_is_any_nan(farg1.d) ||
    919                 float64_is_any_nan(farg2.d))) {
    920        ret = 0x01UL;
    921    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
    922        ret = 0x08UL;
    923    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
    924        ret = 0x04UL;
    925    } else {
    926        ret = 0x02UL;
    927    }
    928
    929    env->fpscr &= ~FP_FPCC;
    930    env->fpscr |= ret << FPSCR_FPCC;
    931    env->crf[crfD] = ret;
    932    if (unlikely(ret == 0x01UL
    933                 && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
    934                     float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
    935        /* sNaN comparison */
    936        float_invalid_op_vxsnan(env, GETPC());
    937    }
    938}
    939
    940void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
    941                  uint32_t crfD)
    942{
    943    CPU_DoubleU farg1, farg2;
    944    uint32_t ret = 0;
    945
    946    farg1.ll = arg1;
    947    farg2.ll = arg2;
    948
    949    if (unlikely(float64_is_any_nan(farg1.d) ||
    950                 float64_is_any_nan(farg2.d))) {
    951        ret = 0x01UL;
    952    } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
    953        ret = 0x08UL;
    954    } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
    955        ret = 0x04UL;
    956    } else {
    957        ret = 0x02UL;
    958    }
    959
    960    env->fpscr &= ~FP_FPCC;
    961    env->fpscr |= ret << FPSCR_FPCC;
    962    env->crf[crfD] = (uint32_t) ret;
    963    if (unlikely(ret == 0x01UL)) {
    964        float_invalid_op_vxvc(env, 1, GETPC());
    965        if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
    966            float64_is_signaling_nan(farg2.d, &env->fp_status)) {
    967            /* sNaN comparison */
    968            float_invalid_op_vxsnan(env, GETPC());
    969        }
    970    }
    971}
    972
    973/* Single-precision floating-point conversions */
    974static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
    975{
    976    CPU_FloatU u;
    977
    978    u.f = int32_to_float32(val, &env->vec_status);
    979
    980    return u.l;
    981}
    982
    983static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
    984{
    985    CPU_FloatU u;
    986
    987    u.f = uint32_to_float32(val, &env->vec_status);
    988
    989    return u.l;
    990}
    991
    992static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
    993{
    994    CPU_FloatU u;
    995
    996    u.l = val;
    997    /* NaN are not treated the same way IEEE 754 does */
    998    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
    999        return 0;
   1000    }
   1001
   1002    return float32_to_int32(u.f, &env->vec_status);
   1003}
   1004
   1005static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
   1006{
   1007    CPU_FloatU u;
   1008
   1009    u.l = val;
   1010    /* NaN are not treated the same way IEEE 754 does */
   1011    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
   1012        return 0;
   1013    }
   1014
   1015    return float32_to_uint32(u.f, &env->vec_status);
   1016}
   1017
   1018static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
   1019{
   1020    CPU_FloatU u;
   1021
   1022    u.l = val;
   1023    /* NaN are not treated the same way IEEE 754 does */
   1024    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
   1025        return 0;
   1026    }
   1027
   1028    return float32_to_int32_round_to_zero(u.f, &env->vec_status);
   1029}
   1030
   1031static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
   1032{
   1033    CPU_FloatU u;
   1034
   1035    u.l = val;
   1036    /* NaN are not treated the same way IEEE 754 does */
   1037    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
   1038        return 0;
   1039    }
   1040
   1041    return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
   1042}
   1043
   1044static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
   1045{
   1046    CPU_FloatU u;
   1047    float32 tmp;
   1048
   1049    u.f = int32_to_float32(val, &env->vec_status);
   1050    tmp = int64_to_float32(1ULL << 32, &env->vec_status);
   1051    u.f = float32_div(u.f, tmp, &env->vec_status);
   1052
   1053    return u.l;
   1054}
   1055
   1056static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
   1057{
   1058    CPU_FloatU u;
   1059    float32 tmp;
   1060
   1061    u.f = uint32_to_float32(val, &env->vec_status);
   1062    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
   1063    u.f = float32_div(u.f, tmp, &env->vec_status);
   1064
   1065    return u.l;
   1066}
   1067
   1068static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
   1069{
   1070    CPU_FloatU u;
   1071    float32 tmp;
   1072
   1073    u.l = val;
   1074    /* NaN are not treated the same way IEEE 754 does */
   1075    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
   1076        return 0;
   1077    }
   1078    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
   1079    u.f = float32_mul(u.f, tmp, &env->vec_status);
   1080
   1081    return float32_to_int32(u.f, &env->vec_status);
   1082}
   1083
   1084static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
   1085{
   1086    CPU_FloatU u;
   1087    float32 tmp;
   1088
   1089    u.l = val;
   1090    /* NaN are not treated the same way IEEE 754 does */
   1091    if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
   1092        return 0;
   1093    }
   1094    tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
   1095    u.f = float32_mul(u.f, tmp, &env->vec_status);
   1096
   1097    return float32_to_uint32(u.f, &env->vec_status);
   1098}
   1099
   1100#define HELPER_SPE_SINGLE_CONV(name)                              \
   1101    uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
   1102    {                                                             \
   1103        return e##name(env, val);                                 \
   1104    }
   1105/* efscfsi */
   1106HELPER_SPE_SINGLE_CONV(fscfsi);
   1107/* efscfui */
   1108HELPER_SPE_SINGLE_CONV(fscfui);
   1109/* efscfuf */
   1110HELPER_SPE_SINGLE_CONV(fscfuf);
   1111/* efscfsf */
   1112HELPER_SPE_SINGLE_CONV(fscfsf);
   1113/* efsctsi */
   1114HELPER_SPE_SINGLE_CONV(fsctsi);
   1115/* efsctui */
   1116HELPER_SPE_SINGLE_CONV(fsctui);
   1117/* efsctsiz */
   1118HELPER_SPE_SINGLE_CONV(fsctsiz);
   1119/* efsctuiz */
   1120HELPER_SPE_SINGLE_CONV(fsctuiz);
   1121/* efsctsf */
   1122HELPER_SPE_SINGLE_CONV(fsctsf);
   1123/* efsctuf */
   1124HELPER_SPE_SINGLE_CONV(fsctuf);
   1125
   1126#define HELPER_SPE_VECTOR_CONV(name)                            \
   1127    uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
   1128    {                                                           \
   1129        return ((uint64_t)e##name(env, val >> 32) << 32) |      \
   1130            (uint64_t)e##name(env, val);                        \
   1131    }
   1132/* evfscfsi */
   1133HELPER_SPE_VECTOR_CONV(fscfsi);
   1134/* evfscfui */
   1135HELPER_SPE_VECTOR_CONV(fscfui);
   1136/* evfscfuf */
   1137HELPER_SPE_VECTOR_CONV(fscfuf);
   1138/* evfscfsf */
   1139HELPER_SPE_VECTOR_CONV(fscfsf);
   1140/* evfsctsi */
   1141HELPER_SPE_VECTOR_CONV(fsctsi);
   1142/* evfsctui */
   1143HELPER_SPE_VECTOR_CONV(fsctui);
   1144/* evfsctsiz */
   1145HELPER_SPE_VECTOR_CONV(fsctsiz);
   1146/* evfsctuiz */
   1147HELPER_SPE_VECTOR_CONV(fsctuiz);
   1148/* evfsctsf */
   1149HELPER_SPE_VECTOR_CONV(fsctsf);
   1150/* evfsctuf */
   1151HELPER_SPE_VECTOR_CONV(fsctuf);
   1152
   1153/* Single-precision floating-point arithmetic */
   1154static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1155{
   1156    CPU_FloatU u1, u2;
   1157
   1158    u1.l = op1;
   1159    u2.l = op2;
   1160    u1.f = float32_add(u1.f, u2.f, &env->vec_status);
   1161    return u1.l;
   1162}
   1163
   1164static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1165{
   1166    CPU_FloatU u1, u2;
   1167
   1168    u1.l = op1;
   1169    u2.l = op2;
   1170    u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
   1171    return u1.l;
   1172}
   1173
   1174static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1175{
   1176    CPU_FloatU u1, u2;
   1177
   1178    u1.l = op1;
   1179    u2.l = op2;
   1180    u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
   1181    return u1.l;
   1182}
   1183
   1184static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1185{
   1186    CPU_FloatU u1, u2;
   1187
   1188    u1.l = op1;
   1189    u2.l = op2;
   1190    u1.f = float32_div(u1.f, u2.f, &env->vec_status);
   1191    return u1.l;
   1192}
   1193
   1194#define HELPER_SPE_SINGLE_ARITH(name)                                   \
   1195    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
   1196    {                                                                   \
   1197        return e##name(env, op1, op2);                                  \
   1198    }
   1199/* efsadd */
   1200HELPER_SPE_SINGLE_ARITH(fsadd);
   1201/* efssub */
   1202HELPER_SPE_SINGLE_ARITH(fssub);
   1203/* efsmul */
   1204HELPER_SPE_SINGLE_ARITH(fsmul);
   1205/* efsdiv */
   1206HELPER_SPE_SINGLE_ARITH(fsdiv);
   1207
   1208#define HELPER_SPE_VECTOR_ARITH(name)                                   \
   1209    uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
   1210    {                                                                   \
   1211        return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
   1212            (uint64_t)e##name(env, op1, op2);                           \
   1213    }
   1214/* evfsadd */
   1215HELPER_SPE_VECTOR_ARITH(fsadd);
   1216/* evfssub */
   1217HELPER_SPE_VECTOR_ARITH(fssub);
   1218/* evfsmul */
   1219HELPER_SPE_VECTOR_ARITH(fsmul);
   1220/* evfsdiv */
   1221HELPER_SPE_VECTOR_ARITH(fsdiv);
   1222
   1223/* Single-precision floating-point comparisons */
   1224static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1225{
   1226    CPU_FloatU u1, u2;
   1227
   1228    u1.l = op1;
   1229    u2.l = op2;
   1230    return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
   1231}
   1232
   1233static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1234{
   1235    CPU_FloatU u1, u2;
   1236
   1237    u1.l = op1;
   1238    u2.l = op2;
   1239    return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
   1240}
   1241
   1242static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1243{
   1244    CPU_FloatU u1, u2;
   1245
   1246    u1.l = op1;
   1247    u2.l = op2;
   1248    return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
   1249}
   1250
   1251static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1252{
   1253    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
   1254    return efscmplt(env, op1, op2);
   1255}
   1256
   1257static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1258{
   1259    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
   1260    return efscmpgt(env, op1, op2);
   1261}
   1262
   1263static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
   1264{
   1265    /* XXX: TODO: ignore special values (NaN, infinites, ...) */
   1266    return efscmpeq(env, op1, op2);
   1267}
   1268
   1269#define HELPER_SINGLE_SPE_CMP(name)                                     \
   1270    uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
   1271    {                                                                   \
   1272        return e##name(env, op1, op2);                                  \
   1273    }
   1274/* efststlt */
   1275HELPER_SINGLE_SPE_CMP(fststlt);
   1276/* efststgt */
   1277HELPER_SINGLE_SPE_CMP(fststgt);
   1278/* efststeq */
   1279HELPER_SINGLE_SPE_CMP(fststeq);
   1280/* efscmplt */
   1281HELPER_SINGLE_SPE_CMP(fscmplt);
   1282/* efscmpgt */
   1283HELPER_SINGLE_SPE_CMP(fscmpgt);
   1284/* efscmpeq */
   1285HELPER_SINGLE_SPE_CMP(fscmpeq);
   1286
   1287static inline uint32_t evcmp_merge(int t0, int t1)
   1288{
   1289    return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
   1290}
   1291
   1292#define HELPER_VECTOR_SPE_CMP(name)                                     \
   1293    uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
   1294    {                                                                   \
   1295        return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
   1296                           e##name(env, op1, op2));                     \
   1297    }
   1298/* evfststlt */
   1299HELPER_VECTOR_SPE_CMP(fststlt);
   1300/* evfststgt */
   1301HELPER_VECTOR_SPE_CMP(fststgt);
   1302/* evfststeq */
   1303HELPER_VECTOR_SPE_CMP(fststeq);
   1304/* evfscmplt */
   1305HELPER_VECTOR_SPE_CMP(fscmplt);
   1306/* evfscmpgt */
   1307HELPER_VECTOR_SPE_CMP(fscmpgt);
   1308/* evfscmpeq */
   1309HELPER_VECTOR_SPE_CMP(fscmpeq);
   1310
   1311/* Double-precision floating-point conversion */
   1312uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
   1313{
   1314    CPU_DoubleU u;
   1315
   1316    u.d = int32_to_float64(val, &env->vec_status);
   1317
   1318    return u.ll;
   1319}
   1320
   1321uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
   1322{
   1323    CPU_DoubleU u;
   1324
   1325    u.d = int64_to_float64(val, &env->vec_status);
   1326
   1327    return u.ll;
   1328}
   1329
   1330uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
   1331{
   1332    CPU_DoubleU u;
   1333
   1334    u.d = uint32_to_float64(val, &env->vec_status);
   1335
   1336    return u.ll;
   1337}
   1338
   1339uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
   1340{
   1341    CPU_DoubleU u;
   1342
   1343    u.d = uint64_to_float64(val, &env->vec_status);
   1344
   1345    return u.ll;
   1346}
   1347
   1348uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
   1349{
   1350    CPU_DoubleU u;
   1351
   1352    u.ll = val;
   1353    /* NaN are not treated the same way IEEE 754 does */
   1354    if (unlikely(float64_is_any_nan(u.d))) {
   1355        return 0;
   1356    }
   1357
   1358    return float64_to_int32(u.d, &env->vec_status);
   1359}
   1360
   1361uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
   1362{
   1363    CPU_DoubleU u;
   1364
   1365    u.ll = val;
   1366    /* NaN are not treated the same way IEEE 754 does */
   1367    if (unlikely(float64_is_any_nan(u.d))) {
   1368        return 0;
   1369    }
   1370
   1371    return float64_to_uint32(u.d, &env->vec_status);
   1372}
   1373
   1374uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
   1375{
   1376    CPU_DoubleU u;
   1377
   1378    u.ll = val;
   1379    /* NaN are not treated the same way IEEE 754 does */
   1380    if (unlikely(float64_is_any_nan(u.d))) {
   1381        return 0;
   1382    }
   1383
   1384    return float64_to_int32_round_to_zero(u.d, &env->vec_status);
   1385}
   1386
   1387uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
   1388{
   1389    CPU_DoubleU u;
   1390
   1391    u.ll = val;
   1392    /* NaN are not treated the same way IEEE 754 does */
   1393    if (unlikely(float64_is_any_nan(u.d))) {
   1394        return 0;
   1395    }
   1396
   1397    return float64_to_int64_round_to_zero(u.d, &env->vec_status);
   1398}
   1399
   1400uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
   1401{
   1402    CPU_DoubleU u;
   1403
   1404    u.ll = val;
   1405    /* NaN are not treated the same way IEEE 754 does */
   1406    if (unlikely(float64_is_any_nan(u.d))) {
   1407        return 0;
   1408    }
   1409
   1410    return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
   1411}
   1412
   1413uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
   1414{
   1415    CPU_DoubleU u;
   1416
   1417    u.ll = val;
   1418    /* NaN are not treated the same way IEEE 754 does */
   1419    if (unlikely(float64_is_any_nan(u.d))) {
   1420        return 0;
   1421    }
   1422
   1423    return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
   1424}
   1425
   1426uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
   1427{
   1428    CPU_DoubleU u;
   1429    float64 tmp;
   1430
   1431    u.d = int32_to_float64(val, &env->vec_status);
   1432    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
   1433    u.d = float64_div(u.d, tmp, &env->vec_status);
   1434
   1435    return u.ll;
   1436}
   1437
   1438uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
   1439{
   1440    CPU_DoubleU u;
   1441    float64 tmp;
   1442
   1443    u.d = uint32_to_float64(val, &env->vec_status);
   1444    tmp = int64_to_float64(1ULL << 32, &env->vec_status);
   1445    u.d = float64_div(u.d, tmp, &env->vec_status);
   1446
   1447    return u.ll;
   1448}
   1449
   1450uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
   1451{
   1452    CPU_DoubleU u;
   1453    float64 tmp;
   1454
   1455    u.ll = val;
   1456    /* NaN are not treated the same way IEEE 754 does */
   1457    if (unlikely(float64_is_any_nan(u.d))) {
   1458        return 0;
   1459    }
   1460    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
   1461    u.d = float64_mul(u.d, tmp, &env->vec_status);
   1462
   1463    return float64_to_int32(u.d, &env->vec_status);
   1464}
   1465
   1466uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
   1467{
   1468    CPU_DoubleU u;
   1469    float64 tmp;
   1470
   1471    u.ll = val;
   1472    /* NaN are not treated the same way IEEE 754 does */
   1473    if (unlikely(float64_is_any_nan(u.d))) {
   1474        return 0;
   1475    }
   1476    tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
   1477    u.d = float64_mul(u.d, tmp, &env->vec_status);
   1478
   1479    return float64_to_uint32(u.d, &env->vec_status);
   1480}
   1481
   1482uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
   1483{
   1484    CPU_DoubleU u1;
   1485    CPU_FloatU u2;
   1486
   1487    u1.ll = val;
   1488    u2.f = float64_to_float32(u1.d, &env->vec_status);
   1489
   1490    return u2.l;
   1491}
   1492
   1493uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
   1494{
   1495    CPU_DoubleU u2;
   1496    CPU_FloatU u1;
   1497
   1498    u1.l = val;
   1499    u2.d = float32_to_float64(u1.f, &env->vec_status);
   1500
   1501    return u2.ll;
   1502}
   1503
   1504/* Double precision fixed-point arithmetic */
   1505uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1506{
   1507    CPU_DoubleU u1, u2;
   1508
   1509    u1.ll = op1;
   1510    u2.ll = op2;
   1511    u1.d = float64_add(u1.d, u2.d, &env->vec_status);
   1512    return u1.ll;
   1513}
   1514
   1515uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1516{
   1517    CPU_DoubleU u1, u2;
   1518
   1519    u1.ll = op1;
   1520    u2.ll = op2;
   1521    u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
   1522    return u1.ll;
   1523}
   1524
   1525uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1526{
   1527    CPU_DoubleU u1, u2;
   1528
   1529    u1.ll = op1;
   1530    u2.ll = op2;
   1531    u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
   1532    return u1.ll;
   1533}
   1534
   1535uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1536{
   1537    CPU_DoubleU u1, u2;
   1538
   1539    u1.ll = op1;
   1540    u2.ll = op2;
   1541    u1.d = float64_div(u1.d, u2.d, &env->vec_status);
   1542    return u1.ll;
   1543}
   1544
   1545/* Double precision floating point helpers */
   1546uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1547{
   1548    CPU_DoubleU u1, u2;
   1549
   1550    u1.ll = op1;
   1551    u2.ll = op2;
   1552    return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
   1553}
   1554
   1555uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1556{
   1557    CPU_DoubleU u1, u2;
   1558
   1559    u1.ll = op1;
   1560    u2.ll = op2;
   1561    return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
   1562}
   1563
   1564uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1565{
   1566    CPU_DoubleU u1, u2;
   1567
   1568    u1.ll = op1;
   1569    u2.ll = op2;
   1570    return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
   1571}
   1572
   1573uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1574{
   1575    /* XXX: TODO: test special values (NaN, infinites, ...) */
   1576    return helper_efdtstlt(env, op1, op2);
   1577}
   1578
   1579uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1580{
   1581    /* XXX: TODO: test special values (NaN, infinites, ...) */
   1582    return helper_efdtstgt(env, op1, op2);
   1583}
   1584
   1585uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
   1586{
   1587    /* XXX: TODO: test special values (NaN, infinites, ...) */
   1588    return helper_efdtsteq(env, op1, op2);
   1589}
   1590
   1591#define float64_to_float64(x, env) x
   1592
   1593
   1594/*
   1595 * VSX_ADD_SUB - VSX floating point add/subtract
   1596 *   name  - instruction mnemonic
   1597 *   op    - operation (add or sub)
   1598 *   nels  - number of elements (1, 2 or 4)
   1599 *   tp    - type (float32 or float64)
   1600 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   1601 *   sfprf - set FPRF
   1602 */
   1603#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
   1604void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                          \
   1605                   ppc_vsr_t *xa, ppc_vsr_t *xb)                             \
   1606{                                                                            \
   1607    ppc_vsr_t t = *xt;                                                       \
   1608    int i;                                                                   \
   1609                                                                             \
   1610    helper_reset_fpstatus(env);                                              \
   1611                                                                             \
   1612    for (i = 0; i < nels; i++) {                                             \
   1613        float_status tstat = env->fp_status;                                 \
   1614        set_float_exception_flags(0, &tstat);                                \
   1615        t.fld = tp##_##op(xa->fld, xb->fld, &tstat);                         \
   1616        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
   1617                                                                             \
   1618        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
   1619            float_invalid_op_addsub(env, sfprf, GETPC(),                     \
   1620                                    tp##_classify(xa->fld) |                 \
   1621                                    tp##_classify(xb->fld));                 \
   1622        }                                                                    \
   1623                                                                             \
   1624        if (r2sp) {                                                          \
   1625            t.fld = helper_frsp(env, t.fld);                                 \
   1626        }                                                                    \
   1627                                                                             \
   1628        if (sfprf) {                                                         \
   1629            helper_compute_fprf_float64(env, t.fld);                         \
   1630        }                                                                    \
   1631    }                                                                        \
   1632    *xt = t;                                                                 \
   1633    do_float_check_status(env, GETPC());                                     \
   1634}
   1635
   1636VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
   1637VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
   1638VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
   1639VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
   1640VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
   1641VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
   1642VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
   1643VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
   1644
   1645void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
   1646                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
   1647{
   1648    ppc_vsr_t t = *xt;
   1649    float_status tstat;
   1650
   1651    helper_reset_fpstatus(env);
   1652
   1653    tstat = env->fp_status;
   1654    if (unlikely(Rc(opcode) != 0)) {
   1655        tstat.float_rounding_mode = float_round_to_odd;
   1656    }
   1657
   1658    set_float_exception_flags(0, &tstat);
   1659    t.f128 = float128_add(xa->f128, xb->f128, &tstat);
   1660    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   1661
   1662    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
   1663        float_invalid_op_addsub(env, 1, GETPC(),
   1664                                float128_classify(xa->f128) |
   1665                                float128_classify(xb->f128));
   1666    }
   1667
   1668    helper_compute_fprf_float128(env, t.f128);
   1669
   1670    *xt = t;
   1671    do_float_check_status(env, GETPC());
   1672}
   1673
   1674/*
   1675 * VSX_MUL - VSX floating point multiply
   1676 *   op    - instruction mnemonic
   1677 *   nels  - number of elements (1, 2 or 4)
   1678 *   tp    - type (float32 or float64)
   1679 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   1680 *   sfprf - set FPRF
   1681 */
   1682#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
   1683void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                            \
   1684                 ppc_vsr_t *xa, ppc_vsr_t *xb)                               \
   1685{                                                                            \
   1686    ppc_vsr_t t = *xt;                                                       \
   1687    int i;                                                                   \
   1688                                                                             \
   1689    helper_reset_fpstatus(env);                                              \
   1690                                                                             \
   1691    for (i = 0; i < nels; i++) {                                             \
   1692        float_status tstat = env->fp_status;                                 \
   1693        set_float_exception_flags(0, &tstat);                                \
   1694        t.fld = tp##_mul(xa->fld, xb->fld, &tstat);                          \
   1695        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
   1696                                                                             \
   1697        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
   1698            float_invalid_op_mul(env, sfprf, GETPC(),                        \
   1699                                 tp##_classify(xa->fld) |                    \
   1700                                 tp##_classify(xb->fld));                    \
   1701        }                                                                    \
   1702                                                                             \
   1703        if (r2sp) {                                                          \
   1704            t.fld = helper_frsp(env, t.fld);                                 \
   1705        }                                                                    \
   1706                                                                             \
   1707        if (sfprf) {                                                         \
   1708            helper_compute_fprf_float64(env, t.fld);                         \
   1709        }                                                                    \
   1710    }                                                                        \
   1711                                                                             \
   1712    *xt = t;                                                                 \
   1713    do_float_check_status(env, GETPC());                                     \
   1714}
   1715
   1716VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
   1717VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
   1718VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
   1719VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
   1720
   1721void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
   1722                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
   1723{
   1724    ppc_vsr_t t = *xt;
   1725    float_status tstat;
   1726
   1727    helper_reset_fpstatus(env);
   1728    tstat = env->fp_status;
   1729    if (unlikely(Rc(opcode) != 0)) {
   1730        tstat.float_rounding_mode = float_round_to_odd;
   1731    }
   1732
   1733    set_float_exception_flags(0, &tstat);
   1734    t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
   1735    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   1736
   1737    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
   1738        float_invalid_op_mul(env, 1, GETPC(),
   1739                             float128_classify(xa->f128) |
   1740                             float128_classify(xb->f128));
   1741    }
   1742    helper_compute_fprf_float128(env, t.f128);
   1743
   1744    *xt = t;
   1745    do_float_check_status(env, GETPC());
   1746}
   1747
   1748/*
   1749 * VSX_DIV - VSX floating point divide
   1750 *   op    - instruction mnemonic
   1751 *   nels  - number of elements (1, 2 or 4)
   1752 *   tp    - type (float32 or float64)
   1753 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   1754 *   sfprf - set FPRF
   1755 */
   1756#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
   1757void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
   1758                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
   1759{                                                                             \
   1760    ppc_vsr_t t = *xt;                                                        \
   1761    int i;                                                                    \
   1762                                                                              \
   1763    helper_reset_fpstatus(env);                                               \
   1764                                                                              \
   1765    for (i = 0; i < nels; i++) {                                              \
   1766        float_status tstat = env->fp_status;                                  \
   1767        set_float_exception_flags(0, &tstat);                                 \
   1768        t.fld = tp##_div(xa->fld, xb->fld, &tstat);                           \
   1769        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
   1770                                                                              \
   1771        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
   1772            float_invalid_op_div(env, sfprf, GETPC(),                         \
   1773                                 tp##_classify(xa->fld) |                     \
   1774                                 tp##_classify(xb->fld));                     \
   1775        }                                                                     \
   1776        if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {   \
   1777            float_zero_divide_excp(env, GETPC());                             \
   1778        }                                                                     \
   1779                                                                              \
   1780        if (r2sp) {                                                           \
   1781            t.fld = helper_frsp(env, t.fld);                                  \
   1782        }                                                                     \
   1783                                                                              \
   1784        if (sfprf) {                                                          \
   1785            helper_compute_fprf_float64(env, t.fld);                          \
   1786        }                                                                     \
   1787    }                                                                         \
   1788                                                                              \
   1789    *xt = t;                                                                  \
   1790    do_float_check_status(env, GETPC());                                      \
   1791}
   1792
   1793VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
   1794VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
   1795VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
   1796VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
   1797
   1798void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
   1799                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
   1800{
   1801    ppc_vsr_t t = *xt;
   1802    float_status tstat;
   1803
   1804    helper_reset_fpstatus(env);
   1805    tstat = env->fp_status;
   1806    if (unlikely(Rc(opcode) != 0)) {
   1807        tstat.float_rounding_mode = float_round_to_odd;
   1808    }
   1809
   1810    set_float_exception_flags(0, &tstat);
   1811    t.f128 = float128_div(xa->f128, xb->f128, &tstat);
   1812    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   1813
   1814    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
   1815        float_invalid_op_div(env, 1, GETPC(),
   1816                             float128_classify(xa->f128) |
   1817                             float128_classify(xb->f128));
   1818    }
   1819    if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
   1820        float_zero_divide_excp(env, GETPC());
   1821    }
   1822
   1823    helper_compute_fprf_float128(env, t.f128);
   1824    *xt = t;
   1825    do_float_check_status(env, GETPC());
   1826}
   1827
   1828/*
   1829 * VSX_RE  - VSX floating point reciprocal estimate
   1830 *   op    - instruction mnemonic
   1831 *   nels  - number of elements (1, 2 or 4)
   1832 *   tp    - type (float32 or float64)
   1833 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   1834 *   sfprf - set FPRF
   1835 */
   1836#define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
   1837void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)              \
   1838{                                                                             \
   1839    ppc_vsr_t t = *xt;                                                        \
   1840    int i;                                                                    \
   1841                                                                              \
   1842    helper_reset_fpstatus(env);                                               \
   1843                                                                              \
   1844    for (i = 0; i < nels; i++) {                                              \
   1845        if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
   1846            float_invalid_op_vxsnan(env, GETPC());                            \
   1847        }                                                                     \
   1848        t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status);                 \
   1849                                                                              \
   1850        if (r2sp) {                                                           \
   1851            t.fld = helper_frsp(env, t.fld);                                  \
   1852        }                                                                     \
   1853                                                                              \
   1854        if (sfprf) {                                                          \
   1855            helper_compute_fprf_float64(env, t.fld);                          \
   1856        }                                                                     \
   1857    }                                                                         \
   1858                                                                              \
   1859    *xt = t;                                                                  \
   1860    do_float_check_status(env, GETPC());                                      \
   1861}
   1862
   1863VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
   1864VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
   1865VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
   1866VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
   1867
   1868/*
   1869 * VSX_SQRT - VSX floating point square root
   1870 *   op    - instruction mnemonic
   1871 *   nels  - number of elements (1, 2 or 4)
   1872 *   tp    - type (float32 or float64)
   1873 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   1874 *   sfprf - set FPRF
   1875 */
   1876#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
   1877void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
   1878{                                                                            \
   1879    ppc_vsr_t t = *xt;                                                       \
   1880    int i;                                                                   \
   1881                                                                             \
   1882    helper_reset_fpstatus(env);                                              \
   1883                                                                             \
   1884    for (i = 0; i < nels; i++) {                                             \
   1885        float_status tstat = env->fp_status;                                 \
   1886        set_float_exception_flags(0, &tstat);                                \
   1887        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
   1888        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
   1889                                                                             \
   1890        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
   1891            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
   1892                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
   1893            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
   1894                float_invalid_op_vxsnan(env, GETPC());                       \
   1895            }                                                                \
   1896        }                                                                    \
   1897                                                                             \
   1898        if (r2sp) {                                                          \
   1899            t.fld = helper_frsp(env, t.fld);                                 \
   1900        }                                                                    \
   1901                                                                             \
   1902        if (sfprf) {                                                         \
   1903            helper_compute_fprf_float64(env, t.fld);                         \
   1904        }                                                                    \
   1905    }                                                                        \
   1906                                                                             \
   1907    *xt = t;                                                                 \
   1908    do_float_check_status(env, GETPC());                                     \
   1909}
   1910
   1911VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
   1912VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
   1913VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
   1914VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
   1915
   1916/*
   1917 *VSX_RSQRTE - VSX floating point reciprocal square root estimate
   1918 *   op    - instruction mnemonic
   1919 *   nels  - number of elements (1, 2 or 4)
   1920 *   tp    - type (float32 or float64)
   1921 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   1922 *   sfprf - set FPRF
   1923 */
   1924#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
   1925void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
   1926{                                                                            \
   1927    ppc_vsr_t t = *xt;                                                       \
   1928    int i;                                                                   \
   1929                                                                             \
   1930    helper_reset_fpstatus(env);                                              \
   1931                                                                             \
   1932    for (i = 0; i < nels; i++) {                                             \
   1933        float_status tstat = env->fp_status;                                 \
   1934        set_float_exception_flags(0, &tstat);                                \
   1935        t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
   1936        t.fld = tp##_div(tp##_one, t.fld, &tstat);                           \
   1937        env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
   1938                                                                             \
   1939        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
   1940            if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) {            \
   1941                float_invalid_op_vxsqrt(env, sfprf, GETPC());                \
   1942            } else if (tp##_is_signaling_nan(xb->fld, &tstat)) {             \
   1943                float_invalid_op_vxsnan(env, GETPC());                       \
   1944            }                                                                \
   1945        }                                                                    \
   1946                                                                             \
   1947        if (r2sp) {                                                          \
   1948            t.fld = helper_frsp(env, t.fld);                                 \
   1949        }                                                                    \
   1950                                                                             \
   1951        if (sfprf) {                                                         \
   1952            helper_compute_fprf_float64(env, t.fld);                         \
   1953        }                                                                    \
   1954    }                                                                        \
   1955                                                                             \
   1956    *xt = t;                                                                 \
   1957    do_float_check_status(env, GETPC());                                     \
   1958}
   1959
   1960VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
   1961VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
   1962VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
   1963VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
   1964
   1965/*
   1966 * VSX_TDIV - VSX floating point test for divide
   1967 *   op    - instruction mnemonic
   1968 *   nels  - number of elements (1, 2 or 4)
   1969 *   tp    - type (float32 or float64)
   1970 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   1971 *   emin  - minimum unbiased exponent
   1972 *   emax  - maximum unbiased exponent
   1973 *   nbits - number of fraction bits
   1974 */
   1975#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
   1976void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
   1977                 ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
   1978{                                                                       \
   1979    int i;                                                              \
   1980    int fe_flag = 0;                                                    \
   1981    int fg_flag = 0;                                                    \
   1982                                                                        \
   1983    for (i = 0; i < nels; i++) {                                        \
   1984        if (unlikely(tp##_is_infinity(xa->fld) ||                       \
   1985                     tp##_is_infinity(xb->fld) ||                       \
   1986                     tp##_is_zero(xb->fld))) {                          \
   1987            fe_flag = 1;                                                \
   1988            fg_flag = 1;                                                \
   1989        } else {                                                        \
   1990            int e_a = ppc_##tp##_get_unbiased_exp(xa->fld);             \
   1991            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
   1992                                                                        \
   1993            if (unlikely(tp##_is_any_nan(xa->fld) ||                    \
   1994                         tp##_is_any_nan(xb->fld))) {                   \
   1995                fe_flag = 1;                                            \
   1996            } else if ((e_b <= emin) || (e_b >= (emax - 2))) {          \
   1997                fe_flag = 1;                                            \
   1998            } else if (!tp##_is_zero(xa->fld) &&                        \
   1999                       (((e_a - e_b) >= emax) ||                        \
   2000                        ((e_a - e_b) <= (emin + 1)) ||                  \
   2001                        (e_a <= (emin + nbits)))) {                     \
   2002                fe_flag = 1;                                            \
   2003            }                                                           \
   2004                                                                        \
   2005            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
   2006                /*                                                      \
   2007                 * XB is not zero because of the above check and so     \
   2008                 * must be denormalized.                                \
   2009                 */                                                     \
   2010                fg_flag = 1;                                            \
   2011            }                                                           \
   2012        }                                                               \
   2013    }                                                                   \
   2014                                                                        \
   2015    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
   2016}
   2017
   2018VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
   2019VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
   2020VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
   2021
   2022/*
   2023 * VSX_TSQRT - VSX floating point test for square root
   2024 *   op    - instruction mnemonic
   2025 *   nels  - number of elements (1, 2 or 4)
   2026 *   tp    - type (float32 or float64)
   2027 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   2028 *   emin  - minimum unbiased exponent
   2029 *   emax  - maximum unbiased exponent
   2030 *   nbits - number of fraction bits
   2031 */
   2032#define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
   2033void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)      \
   2034{                                                                       \
   2035    int i;                                                              \
   2036    int fe_flag = 0;                                                    \
   2037    int fg_flag = 0;                                                    \
   2038                                                                        \
   2039    for (i = 0; i < nels; i++) {                                        \
   2040        if (unlikely(tp##_is_infinity(xb->fld) ||                       \
   2041                     tp##_is_zero(xb->fld))) {                          \
   2042            fe_flag = 1;                                                \
   2043            fg_flag = 1;                                                \
   2044        } else {                                                        \
   2045            int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
   2046                                                                        \
   2047            if (unlikely(tp##_is_any_nan(xb->fld))) {                   \
   2048                fe_flag = 1;                                            \
   2049            } else if (unlikely(tp##_is_zero(xb->fld))) {               \
   2050                fe_flag = 1;                                            \
   2051            } else if (unlikely(tp##_is_neg(xb->fld))) {                \
   2052                fe_flag = 1;                                            \
   2053            } else if (!tp##_is_zero(xb->fld) &&                        \
   2054                       (e_b <= (emin + nbits))) {                       \
   2055                fe_flag = 1;                                            \
   2056            }                                                           \
   2057                                                                        \
   2058            if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
   2059                /*                                                      \
   2060                 * XB is not zero because of the above check and        \
   2061                 * therefore must be denormalized.                      \
   2062                 */                                                     \
   2063                fg_flag = 1;                                            \
   2064            }                                                           \
   2065        }                                                               \
   2066    }                                                                   \
   2067                                                                        \
   2068    env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
   2069}
   2070
   2071VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
   2072VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
   2073VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
   2074
   2075/*
   2076 * VSX_MADD - VSX floating point muliply/add variations
   2077 *   op    - instruction mnemonic
   2078 *   nels  - number of elements (1, 2 or 4)
   2079 *   tp    - type (float32 or float64)
   2080 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   2081 *   maddflgs - flags for the float*muladd routine that control the
   2082 *           various forms (madd, msub, nmadd, nmsub)
   2083 *   sfprf - set FPRF
   2084 */
   2085#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp)                    \
   2086void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
   2087                 ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c)                   \
   2088{                                                                             \
   2089    ppc_vsr_t t = *xt;                                                        \
   2090    int i;                                                                    \
   2091                                                                              \
   2092    helper_reset_fpstatus(env);                                               \
   2093                                                                              \
   2094    for (i = 0; i < nels; i++) {                                              \
   2095        float_status tstat = env->fp_status;                                  \
   2096        set_float_exception_flags(0, &tstat);                                 \
   2097        if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
   2098            /*                                                                \
   2099             * Avoid double rounding errors by rounding the intermediate      \
   2100             * result to odd.                                                 \
   2101             */                                                               \
   2102            set_float_rounding_mode(float_round_to_zero, &tstat);             \
   2103            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
   2104                                maddflgs, &tstat);                            \
   2105            t.fld |= (get_float_exception_flags(&tstat) &                     \
   2106                      float_flag_inexact) != 0;                               \
   2107        } else {                                                              \
   2108            t.fld = tp##_muladd(xa->fld, b->fld, c->fld,                      \
   2109                                maddflgs, &tstat);                            \
   2110        }                                                                     \
   2111        env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
   2112                                                                              \
   2113        if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
   2114            tp##_maddsub_update_excp(env, xa->fld, b->fld,                    \
   2115                                     c->fld, maddflgs, GETPC());              \
   2116        }                                                                     \
   2117                                                                              \
   2118        if (r2sp) {                                                           \
   2119            t.fld = helper_frsp(env, t.fld);                                  \
   2120        }                                                                     \
   2121                                                                              \
   2122        if (sfprf) {                                                          \
   2123            helper_compute_fprf_float64(env, t.fld);                          \
   2124        }                                                                     \
   2125    }                                                                         \
   2126    *xt = t;                                                                  \
   2127    do_float_check_status(env, GETPC());                                      \
   2128}
   2129
   2130VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
   2131VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
   2132VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
   2133VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
   2134VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
   2135VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
   2136VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
   2137VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
   2138
   2139VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
   2140VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
   2141VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
   2142VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
   2143
   2144VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
   2145VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
   2146VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
   2147VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
   2148
   2149/*
   2150 * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
   2151 *   op    - instruction mnemonic
   2152 *   cmp   - comparison operation
   2153 *   exp   - expected result of comparison
   2154 *   svxvc - set VXVC bit
   2155 */
   2156#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
   2157void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
   2158                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
   2159{                                                                             \
   2160    ppc_vsr_t t = *xt;                                                        \
   2161    bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
   2162                                                                              \
   2163    if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||             \
   2164        float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {             \
   2165        vxsnan_flag = true;                                                   \
   2166        if (fpscr_ve == 0 && svxvc) {                                         \
   2167            vxvc_flag = true;                                                 \
   2168        }                                                                     \
   2169    } else if (svxvc) {                                                       \
   2170        vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||     \
   2171            float64_is_quiet_nan(xb->VsrD(0), &env->fp_status);               \
   2172    }                                                                         \
   2173    if (vxsnan_flag) {                                                        \
   2174        float_invalid_op_vxsnan(env, GETPC());                                \
   2175    }                                                                         \
   2176    if (vxvc_flag) {                                                          \
   2177        float_invalid_op_vxvc(env, 0, GETPC());                               \
   2178    }                                                                         \
   2179    vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
   2180                                                                              \
   2181    if (!vex_flag) {                                                          \
   2182        if (float64_##cmp(xb->VsrD(0), xa->VsrD(0),                           \
   2183                          &env->fp_status) == exp) {                          \
   2184            t.VsrD(0) = -1;                                                   \
   2185            t.VsrD(1) = 0;                                                    \
   2186        } else {                                                              \
   2187            t.VsrD(0) = 0;                                                    \
   2188            t.VsrD(1) = 0;                                                    \
   2189        }                                                                     \
   2190    }                                                                         \
   2191    *xt = t;                                                                  \
   2192    do_float_check_status(env, GETPC());                                      \
   2193}
   2194
   2195VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
   2196VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
   2197VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
   2198VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
   2199
   2200void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
   2201                       ppc_vsr_t *xa, ppc_vsr_t *xb)
   2202{
   2203    int64_t exp_a, exp_b;
   2204    uint32_t cc;
   2205
   2206    exp_a = extract64(xa->VsrD(0), 52, 11);
   2207    exp_b = extract64(xb->VsrD(0), 52, 11);
   2208
   2209    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
   2210                 float64_is_any_nan(xb->VsrD(0)))) {
   2211        cc = CRF_SO;
   2212    } else {
   2213        if (exp_a < exp_b) {
   2214            cc = CRF_LT;
   2215        } else if (exp_a > exp_b) {
   2216            cc = CRF_GT;
   2217        } else {
   2218            cc = CRF_EQ;
   2219        }
   2220    }
   2221
   2222    env->fpscr &= ~FP_FPCC;
   2223    env->fpscr |= cc << FPSCR_FPCC;
   2224    env->crf[BF(opcode)] = cc;
   2225
   2226    do_float_check_status(env, GETPC());
   2227}
   2228
   2229void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
   2230                       ppc_vsr_t *xa, ppc_vsr_t *xb)
   2231{
   2232    int64_t exp_a, exp_b;
   2233    uint32_t cc;
   2234
   2235    exp_a = extract64(xa->VsrD(0), 48, 15);
   2236    exp_b = extract64(xb->VsrD(0), 48, 15);
   2237
   2238    if (unlikely(float128_is_any_nan(xa->f128) ||
   2239                 float128_is_any_nan(xb->f128))) {
   2240        cc = CRF_SO;
   2241    } else {
   2242        if (exp_a < exp_b) {
   2243            cc = CRF_LT;
   2244        } else if (exp_a > exp_b) {
   2245            cc = CRF_GT;
   2246        } else {
   2247            cc = CRF_EQ;
   2248        }
   2249    }
   2250
   2251    env->fpscr &= ~FP_FPCC;
   2252    env->fpscr |= cc << FPSCR_FPCC;
   2253    env->crf[BF(opcode)] = cc;
   2254
   2255    do_float_check_status(env, GETPC());
   2256}
   2257
   2258static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
   2259                                 int crf_idx, bool ordered)
   2260{
   2261    uint32_t cc;
   2262    bool vxsnan_flag = false, vxvc_flag = false;
   2263
   2264    helper_reset_fpstatus(env);
   2265
   2266    switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {
   2267    case float_relation_less:
   2268        cc = CRF_LT;
   2269        break;
   2270    case float_relation_equal:
   2271        cc = CRF_EQ;
   2272        break;
   2273    case float_relation_greater:
   2274        cc = CRF_GT;
   2275        break;
   2276    case float_relation_unordered:
   2277        cc = CRF_SO;
   2278
   2279        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
   2280            float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
   2281            vxsnan_flag = true;
   2282            if (fpscr_ve == 0 && ordered) {
   2283                vxvc_flag = true;
   2284            }
   2285        } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
   2286                   float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {
   2287            if (ordered) {
   2288                vxvc_flag = true;
   2289            }
   2290        }
   2291
   2292        break;
   2293    default:
   2294        g_assert_not_reached();
   2295    }
   2296
   2297    env->fpscr &= ~FP_FPCC;
   2298    env->fpscr |= cc << FPSCR_FPCC;
   2299    env->crf[crf_idx] = cc;
   2300
   2301    if (vxsnan_flag) {
   2302        float_invalid_op_vxsnan(env, GETPC());
   2303    }
   2304    if (vxvc_flag) {
   2305        float_invalid_op_vxvc(env, 0, GETPC());
   2306    }
   2307
   2308    do_float_check_status(env, GETPC());
   2309}
   2310
   2311void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
   2312                     ppc_vsr_t *xb)
   2313{
   2314    do_scalar_cmp(env, xa, xb, BF(opcode), true);
   2315}
   2316
   2317void helper_xscmpudp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
   2318                     ppc_vsr_t *xb)
   2319{
   2320    do_scalar_cmp(env, xa, xb, BF(opcode), false);
   2321}
   2322
   2323static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
   2324                                  ppc_vsr_t *xb, int crf_idx, bool ordered)
   2325{
   2326    uint32_t cc;
   2327    bool vxsnan_flag = false, vxvc_flag = false;
   2328
   2329    helper_reset_fpstatus(env);
   2330
   2331    switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) {
   2332    case float_relation_less:
   2333        cc = CRF_LT;
   2334        break;
   2335    case float_relation_equal:
   2336        cc = CRF_EQ;
   2337        break;
   2338    case float_relation_greater:
   2339        cc = CRF_GT;
   2340        break;
   2341    case float_relation_unordered:
   2342        cc = CRF_SO;
   2343
   2344        if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
   2345            float128_is_signaling_nan(xb->f128, &env->fp_status)) {
   2346            vxsnan_flag = true;
   2347            if (fpscr_ve == 0 && ordered) {
   2348                vxvc_flag = true;
   2349            }
   2350        } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
   2351                   float128_is_quiet_nan(xb->f128, &env->fp_status)) {
   2352            if (ordered) {
   2353                vxvc_flag = true;
   2354            }
   2355        }
   2356
   2357        break;
   2358    default:
   2359        g_assert_not_reached();
   2360    }
   2361
   2362    env->fpscr &= ~FP_FPCC;
   2363    env->fpscr |= cc << FPSCR_FPCC;
   2364    env->crf[crf_idx] = cc;
   2365
   2366    if (vxsnan_flag) {
   2367        float_invalid_op_vxsnan(env, GETPC());
   2368    }
   2369    if (vxvc_flag) {
   2370        float_invalid_op_vxvc(env, 0, GETPC());
   2371    }
   2372
   2373    do_float_check_status(env, GETPC());
   2374}
   2375
   2376void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
   2377                     ppc_vsr_t *xb)
   2378{
   2379    do_scalar_cmpq(env, xa, xb, BF(opcode), true);
   2380}
   2381
   2382void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
   2383                     ppc_vsr_t *xb)
   2384{
   2385    do_scalar_cmpq(env, xa, xb, BF(opcode), false);
   2386}
   2387
   2388/*
   2389 * VSX_MAX_MIN - VSX floating point maximum/minimum
   2390 *   name  - instruction mnemonic
   2391 *   op    - operation (max or min)
   2392 *   nels  - number of elements (1, 2 or 4)
   2393 *   tp    - type (float32 or float64)
   2394 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   2395 */
   2396#define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
   2397void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                           \
   2398                   ppc_vsr_t *xa, ppc_vsr_t *xb)                              \
   2399{                                                                             \
   2400    ppc_vsr_t t = *xt;                                                        \
   2401    int i;                                                                    \
   2402                                                                              \
   2403    for (i = 0; i < nels; i++) {                                              \
   2404        t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status);                 \
   2405        if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) ||       \
   2406                     tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
   2407            float_invalid_op_vxsnan(env, GETPC());                            \
   2408        }                                                                     \
   2409    }                                                                         \
   2410                                                                              \
   2411    *xt = t;                                                                  \
   2412    do_float_check_status(env, GETPC());                                      \
   2413}
   2414
   2415VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
   2416VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
   2417VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
   2418VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
   2419VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
   2420VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
   2421
   2422#define VSX_MAX_MINC(name, max)                                               \
   2423void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
   2424                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
   2425{                                                                             \
   2426    ppc_vsr_t t = *xt;                                                        \
   2427    bool vxsnan_flag = false, vex_flag = false;                               \
   2428                                                                              \
   2429    if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||                           \
   2430                 float64_is_any_nan(xb->VsrD(0)))) {                          \
   2431        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||         \
   2432            float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
   2433            vxsnan_flag = true;                                               \
   2434        }                                                                     \
   2435        t.VsrD(0) = xb->VsrD(0);                                              \
   2436    } else if ((max &&                                                        \
   2437               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
   2438               (!max &&                                                       \
   2439               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
   2440        t.VsrD(0) = xa->VsrD(0);                                              \
   2441    } else {                                                                  \
   2442        t.VsrD(0) = xb->VsrD(0);                                              \
   2443    }                                                                         \
   2444                                                                              \
   2445    vex_flag = fpscr_ve & vxsnan_flag;                                        \
   2446    if (vxsnan_flag) {                                                        \
   2447        float_invalid_op_vxsnan(env, GETPC());                                \
   2448    }                                                                         \
   2449    if (!vex_flag) {                                                          \
   2450        *xt = t;                                                              \
   2451    }                                                                         \
   2452}                                                                             \
   2453
   2454VSX_MAX_MINC(xsmaxcdp, 1);
   2455VSX_MAX_MINC(xsmincdp, 0);
   2456
   2457#define VSX_MAX_MINJ(name, max)                                               \
   2458void helper_##name(CPUPPCState *env, uint32_t opcode,                         \
   2459                   ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
   2460{                                                                             \
   2461    ppc_vsr_t t = *xt;                                                        \
   2462    bool vxsnan_flag = false, vex_flag = false;                               \
   2463                                                                              \
   2464    if (unlikely(float64_is_any_nan(xa->VsrD(0)))) {                          \
   2465        if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) {         \
   2466            vxsnan_flag = true;                                               \
   2467        }                                                                     \
   2468        t.VsrD(0) = xa->VsrD(0);                                              \
   2469    } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) {                   \
   2470        if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
   2471            vxsnan_flag = true;                                               \
   2472        }                                                                     \
   2473        t.VsrD(0) = xb->VsrD(0);                                              \
   2474    } else if (float64_is_zero(xa->VsrD(0)) &&                                \
   2475               float64_is_zero(xb->VsrD(0))) {                                \
   2476        if (max) {                                                            \
   2477            if (!float64_is_neg(xa->VsrD(0)) ||                               \
   2478                !float64_is_neg(xb->VsrD(0))) {                               \
   2479                t.VsrD(0) = 0ULL;                                             \
   2480            } else {                                                          \
   2481                t.VsrD(0) = 0x8000000000000000ULL;                            \
   2482            }                                                                 \
   2483        } else {                                                              \
   2484            if (float64_is_neg(xa->VsrD(0)) ||                                \
   2485                float64_is_neg(xb->VsrD(0))) {                                \
   2486                t.VsrD(0) = 0x8000000000000000ULL;                            \
   2487            } else {                                                          \
   2488                t.VsrD(0) = 0ULL;                                             \
   2489            }                                                                 \
   2490        }                                                                     \
   2491    } else if ((max &&                                                        \
   2492               !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
   2493               (!max &&                                                       \
   2494               float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
   2495        t.VsrD(0) = xa->VsrD(0);                                              \
   2496    } else {                                                                  \
   2497        t.VsrD(0) = xb->VsrD(0);                                              \
   2498    }                                                                         \
   2499                                                                              \
   2500    vex_flag = fpscr_ve & vxsnan_flag;                                        \
   2501    if (vxsnan_flag) {                                                        \
   2502        float_invalid_op_vxsnan(env, GETPC());                                \
   2503    }                                                                         \
   2504    if (!vex_flag) {                                                          \
   2505        *xt = t;                                                              \
   2506    }                                                                         \
   2507}                                                                             \
   2508
   2509VSX_MAX_MINJ(xsmaxjdp, 1);
   2510VSX_MAX_MINJ(xsminjdp, 0);
   2511
   2512/*
   2513 * VSX_CMP - VSX floating point compare
   2514 *   op    - instruction mnemonic
   2515 *   nels  - number of elements (1, 2 or 4)
   2516 *   tp    - type (float32 or float64)
   2517 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   2518 *   cmp   - comparison operation
   2519 *   svxvc - set VXVC bit
   2520 *   exp   - expected result of comparison
   2521 */
   2522#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
   2523uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
   2524                     ppc_vsr_t *xa, ppc_vsr_t *xb)                        \
   2525{                                                                         \
   2526    ppc_vsr_t t = *xt;                                                    \
   2527    uint32_t crf6 = 0;                                                    \
   2528    int i;                                                                \
   2529    int all_true = 1;                                                     \
   2530    int all_false = 1;                                                    \
   2531                                                                          \
   2532    for (i = 0; i < nels; i++) {                                          \
   2533        if (unlikely(tp##_is_any_nan(xa->fld) ||                          \
   2534                     tp##_is_any_nan(xb->fld))) {                         \
   2535            if (tp##_is_signaling_nan(xa->fld, &env->fp_status) ||        \
   2536                tp##_is_signaling_nan(xb->fld, &env->fp_status)) {        \
   2537                float_invalid_op_vxsnan(env, GETPC());                    \
   2538            }                                                             \
   2539            if (svxvc) {                                                  \
   2540                float_invalid_op_vxvc(env, 0, GETPC());                   \
   2541            }                                                             \
   2542            t.fld = 0;                                                    \
   2543            all_true = 0;                                                 \
   2544        } else {                                                          \
   2545            if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) {   \
   2546                t.fld = -1;                                               \
   2547                all_false = 0;                                            \
   2548            } else {                                                      \
   2549                t.fld = 0;                                                \
   2550                all_true = 0;                                             \
   2551            }                                                             \
   2552        }                                                                 \
   2553    }                                                                     \
   2554                                                                          \
   2555    *xt = t;                                                              \
   2556    crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);                  \
   2557    return crf6;                                                          \
   2558}
   2559
   2560VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
   2561VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
   2562VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
   2563VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
   2564VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
   2565VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
   2566VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
   2567VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
   2568
   2569/*
   2570 * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
   2571 *   op    - instruction mnemonic
   2572 *   nels  - number of elements (1, 2 or 4)
   2573 *   stp   - source type (float32 or float64)
   2574 *   ttp   - target type (float32 or float64)
   2575 *   sfld  - source vsr_t field
   2576 *   tfld  - target vsr_t field (f32 or f64)
   2577 *   sfprf - set FPRF
   2578 */
   2579#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
   2580void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
   2581{                                                                  \
   2582    ppc_vsr_t t = *xt;                                             \
   2583    int i;                                                         \
   2584                                                                   \
   2585    for (i = 0; i < nels; i++) {                                   \
   2586        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);        \
   2587        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
   2588                                            &env->fp_status))) {   \
   2589            float_invalid_op_vxsnan(env, GETPC());                 \
   2590            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
   2591        }                                                          \
   2592        if (sfprf) {                                               \
   2593            helper_compute_fprf_##ttp(env, t.tfld);                \
   2594        }                                                          \
   2595    }                                                              \
   2596                                                                   \
   2597    *xt = t;                                                       \
   2598    do_float_check_status(env, GETPC());                           \
   2599}
   2600
   2601VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
   2602VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
   2603VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
   2604VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
   2605
   2606/*
   2607 * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
   2608 *   op    - instruction mnemonic
   2609 *   nels  - number of elements (1, 2 or 4)
   2610 *   stp   - source type (float32 or float64)
   2611 *   ttp   - target type (float32 or float64)
   2612 *   sfld  - source vsr_t field
   2613 *   tfld  - target vsr_t field (f32 or f64)
   2614 *   sfprf - set FPRF
   2615 */
   2616#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
   2617void helper_##op(CPUPPCState *env, uint32_t opcode,                       \
   2618                 ppc_vsr_t *xt, ppc_vsr_t *xb)                            \
   2619{                                                                       \
   2620    ppc_vsr_t t = *xt;                                                  \
   2621    int i;                                                              \
   2622                                                                        \
   2623    for (i = 0; i < nels; i++) {                                        \
   2624        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
   2625        if (unlikely(stp##_is_signaling_nan(xb->sfld,                   \
   2626                                            &env->fp_status))) {        \
   2627            float_invalid_op_vxsnan(env, GETPC());                      \
   2628            t.tfld = ttp##_snan_to_qnan(t.tfld);                        \
   2629        }                                                               \
   2630        if (sfprf) {                                                    \
   2631            helper_compute_fprf_##ttp(env, t.tfld);                     \
   2632        }                                                               \
   2633    }                                                                   \
   2634                                                                        \
   2635    *xt = t;                                                            \
   2636    do_float_check_status(env, GETPC());                                \
   2637}
   2638
   2639VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
   2640
   2641/*
   2642 * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
   2643 *                       involving one half precision value
   2644 *   op    - instruction mnemonic
   2645 *   nels  - number of elements (1, 2 or 4)
   2646 *   stp   - source type
   2647 *   ttp   - target type
   2648 *   sfld  - source vsr_t field
   2649 *   tfld  - target vsr_t field
   2650 *   sfprf - set FPRF
   2651 */
   2652#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
   2653void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
   2654{                                                                  \
   2655    ppc_vsr_t t = { };                                             \
   2656    int i;                                                         \
   2657                                                                   \
   2658    for (i = 0; i < nels; i++) {                                   \
   2659        t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status);     \
   2660        if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
   2661                                            &env->fp_status))) {   \
   2662            float_invalid_op_vxsnan(env, GETPC());                 \
   2663            t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
   2664        }                                                          \
   2665        if (sfprf) {                                               \
   2666            helper_compute_fprf_##ttp(env, t.tfld);                \
   2667        }                                                          \
   2668    }                                                              \
   2669                                                                   \
   2670    *xt = t;                                                       \
   2671    do_float_check_status(env, GETPC());                           \
   2672}
   2673
   2674VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
   2675VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
   2676VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
   2677VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
   2678
   2679/*
   2680 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
   2681 * added to this later.
   2682 */
   2683void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
   2684                     ppc_vsr_t *xt, ppc_vsr_t *xb)
   2685{
   2686    ppc_vsr_t t = { };
   2687    float_status tstat;
   2688
   2689    tstat = env->fp_status;
   2690    if (unlikely(Rc(opcode) != 0)) {
   2691        tstat.float_rounding_mode = float_round_to_odd;
   2692    }
   2693
   2694    t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
   2695    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   2696    if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
   2697        float_invalid_op_vxsnan(env, GETPC());
   2698        t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
   2699    }
   2700    helper_compute_fprf_float64(env, t.VsrD(0));
   2701
   2702    *xt = t;
   2703    do_float_check_status(env, GETPC());
   2704}
   2705
   2706uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
   2707{
   2708    uint64_t result, sign, exp, frac;
   2709
   2710    float_status tstat = env->fp_status;
   2711    set_float_exception_flags(0, &tstat);
   2712
   2713    sign = extract64(xb, 63,  1);
   2714    exp  = extract64(xb, 52, 11);
   2715    frac = extract64(xb,  0, 52) | 0x10000000000000ULL;
   2716
   2717    if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
   2718        /* DP denormal operand.  */
   2719        /* Exponent override to DP min exp.  */
   2720        exp = 1;
   2721        /* Implicit bit override to 0.  */
   2722        frac = deposit64(frac, 53, 1, 0);
   2723    }
   2724
   2725    if (unlikely(exp < 897 && frac != 0)) {
   2726        /* SP tiny operand.  */
   2727        if (897 - exp > 63) {
   2728            frac = 0;
   2729        } else {
   2730            /* Denormalize until exp = SP min exp.  */
   2731            frac >>= (897 - exp);
   2732        }
   2733        /* Exponent override to SP min exp - 1.  */
   2734        exp = 896;
   2735    }
   2736
   2737    result = sign << 31;
   2738    result |= extract64(exp, 10, 1) << 30;
   2739    result |= extract64(exp, 0, 7) << 23;
   2740    result |= extract64(frac, 29, 23);
   2741
   2742    /* hardware replicates result to both words of the doubleword result.  */
   2743    return (result << 32) | result;
   2744}
   2745
   2746uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
   2747{
   2748    float_status tstat = env->fp_status;
   2749    set_float_exception_flags(0, &tstat);
   2750
   2751    return float32_to_float64(xb >> 32, &tstat);
   2752}
   2753
   2754/*
   2755 * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
   2756 *   op    - instruction mnemonic
   2757 *   nels  - number of elements (1, 2 or 4)
   2758 *   stp   - source type (float32 or float64)
   2759 *   ttp   - target type (int32, uint32, int64 or uint64)
   2760 *   sfld  - source vsr_t field
   2761 *   tfld  - target vsr_t field
   2762 *   rnan  - resulting NaN
   2763 */
   2764#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
   2765void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
   2766{                                                                            \
   2767    int all_flags = env->fp_status.float_exception_flags, flags;             \
   2768    ppc_vsr_t t = *xt;                                                       \
   2769    int i;                                                                   \
   2770                                                                             \
   2771    for (i = 0; i < nels; i++) {                                             \
   2772        env->fp_status.float_exception_flags = 0;                            \
   2773        t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);  \
   2774        flags = env->fp_status.float_exception_flags;                        \
   2775        if (unlikely(flags & float_flag_invalid)) {                          \
   2776            float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));    \
   2777            t.tfld = rnan;                                                   \
   2778        }                                                                    \
   2779        all_flags |= flags;                                                  \
   2780    }                                                                        \
   2781                                                                             \
   2782    *xt = t;                                                                 \
   2783    env->fp_status.float_exception_flags = all_flags;                        \
   2784    do_float_check_status(env, GETPC());                                     \
   2785}
   2786
   2787VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
   2788                  0x8000000000000000ULL)
   2789VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
   2790                  0x80000000U)
   2791VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
   2792VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
   2793VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
   2794                  0x8000000000000000ULL)
   2795VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
   2796                  0x80000000U)
   2797VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
   2798VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
   2799VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
   2800                  0x8000000000000000ULL)
   2801VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
   2802VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
   2803VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
   2804
   2805/*
   2806 * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
   2807 *   op    - instruction mnemonic
   2808 *   stp   - source type (float32 or float64)
   2809 *   ttp   - target type (int32, uint32, int64 or uint64)
   2810 *   sfld  - source vsr_t field
   2811 *   tfld  - target vsr_t field
   2812 *   rnan  - resulting NaN
   2813 */
   2814#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
   2815void helper_##op(CPUPPCState *env, uint32_t opcode,                          \
   2816                 ppc_vsr_t *xt, ppc_vsr_t *xb)                               \
   2817{                                                                            \
   2818    ppc_vsr_t t = { };                                                       \
   2819                                                                             \
   2820    t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);      \
   2821    if (env->fp_status.float_exception_flags & float_flag_invalid) {         \
   2822        float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld));        \
   2823        t.tfld = rnan;                                                       \
   2824    }                                                                        \
   2825                                                                             \
   2826    *xt = t;                                                                 \
   2827    do_float_check_status(env, GETPC());                                     \
   2828}
   2829
   2830VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
   2831                  0x8000000000000000ULL)
   2832
   2833VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
   2834                  0xffffffff80000000ULL)
   2835VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
   2836VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
   2837
   2838/*
   2839 * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
   2840 *   op    - instruction mnemonic
   2841 *   nels  - number of elements (1, 2 or 4)
   2842 *   stp   - source type (int32, uint32, int64 or uint64)
   2843 *   ttp   - target type (float32 or float64)
   2844 *   sfld  - source vsr_t field
   2845 *   tfld  - target vsr_t field
   2846 *   jdef  - definition of the j index (i or 2*i)
   2847 *   sfprf - set FPRF
   2848 */
   2849#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
   2850void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
   2851{                                                                       \
   2852    ppc_vsr_t t = *xt;                                                  \
   2853    int i;                                                              \
   2854                                                                        \
   2855    for (i = 0; i < nels; i++) {                                        \
   2856        t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
   2857        if (r2sp) {                                                     \
   2858            t.tfld = helper_frsp(env, t.tfld);                          \
   2859        }                                                               \
   2860        if (sfprf) {                                                    \
   2861            helper_compute_fprf_float64(env, t.tfld);                   \
   2862        }                                                               \
   2863    }                                                                   \
   2864                                                                        \
   2865    *xt = t;                                                            \
   2866    do_float_check_status(env, GETPC());                                \
   2867}
   2868
   2869VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
   2870VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
   2871VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
   2872VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
   2873VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
   2874VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
   2875VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
   2876VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
   2877VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
   2878VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
   2879VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
   2880VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
   2881
   2882/*
   2883 * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
   2884 *   op    - instruction mnemonic
   2885 *   stp   - source type (int32, uint32, int64 or uint64)
   2886 *   ttp   - target type (float32 or float64)
   2887 *   sfld  - source vsr_t field
   2888 *   tfld  - target vsr_t field
   2889 */
   2890#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
   2891void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
   2892                 ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
   2893{                                                                       \
   2894    ppc_vsr_t t = *xt;                                                  \
   2895                                                                        \
   2896    t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);                 \
   2897    helper_compute_fprf_##ttp(env, t.tfld);                             \
   2898                                                                        \
   2899    *xt = t;                                                            \
   2900    do_float_check_status(env, GETPC());                                \
   2901}
   2902
   2903VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
   2904VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
   2905
   2906/*
   2907 * For "use current rounding mode", define a value that will not be
   2908 * one of the existing rounding model enums.
   2909 */
   2910#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
   2911  float_round_up + float_round_to_zero)
   2912
   2913/*
   2914 * VSX_ROUND - VSX floating point round
   2915 *   op    - instruction mnemonic
   2916 *   nels  - number of elements (1, 2 or 4)
   2917 *   tp    - type (float32 or float64)
   2918 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   2919 *   rmode - rounding mode
   2920 *   sfprf - set FPRF
   2921 */
   2922#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
   2923void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)       \
   2924{                                                                      \
   2925    ppc_vsr_t t = *xt;                                                 \
   2926    int i;                                                             \
   2927    FloatRoundMode curr_rounding_mode;                                 \
   2928                                                                       \
   2929    if (rmode != FLOAT_ROUND_CURRENT) {                                \
   2930        curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
   2931        set_float_rounding_mode(rmode, &env->fp_status);               \
   2932    }                                                                  \
   2933                                                                       \
   2934    for (i = 0; i < nels; i++) {                                       \
   2935        if (unlikely(tp##_is_signaling_nan(xb->fld,                    \
   2936                                           &env->fp_status))) {        \
   2937            float_invalid_op_vxsnan(env, GETPC());                     \
   2938            t.fld = tp##_snan_to_qnan(xb->fld);                        \
   2939        } else {                                                       \
   2940            t.fld = tp##_round_to_int(xb->fld, &env->fp_status);       \
   2941        }                                                              \
   2942        if (sfprf) {                                                   \
   2943            helper_compute_fprf_float64(env, t.fld);                   \
   2944        }                                                              \
   2945    }                                                                  \
   2946                                                                       \
   2947    /*                                                                 \
   2948     * If this is not a "use current rounding mode" instruction,       \
   2949     * then inhibit setting of the XX bit and restore rounding         \
   2950     * mode from FPSCR                                                 \
   2951     */                                                                \
   2952    if (rmode != FLOAT_ROUND_CURRENT) {                                \
   2953        set_float_rounding_mode(curr_rounding_mode, &env->fp_status);  \
   2954        env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
   2955    }                                                                  \
   2956                                                                       \
   2957    *xt = t;                                                           \
   2958    do_float_check_status(env, GETPC());                               \
   2959}
   2960
   2961VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
   2962VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
   2963VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
   2964VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
   2965VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
   2966
   2967VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
   2968VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
   2969VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
   2970VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
   2971VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
   2972
   2973VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
   2974VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
   2975VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
   2976VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
   2977VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
   2978
   2979uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
   2980{
   2981    helper_reset_fpstatus(env);
   2982
   2983    uint64_t xt = helper_frsp(env, xb);
   2984
   2985    helper_compute_fprf_float64(env, xt);
   2986    do_float_check_status(env, GETPC());
   2987    return xt;
   2988}
   2989
   2990#define VSX_XXPERM(op, indexed)                                       \
   2991void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
   2992                 ppc_vsr_t *xa, ppc_vsr_t *pcv)                       \
   2993{                                                                     \
   2994    ppc_vsr_t t = *xt;                                                \
   2995    int i, idx;                                                       \
   2996                                                                      \
   2997    for (i = 0; i < 16; i++) {                                        \
   2998        idx = pcv->VsrB(i) & 0x1F;                                    \
   2999        if (indexed) {                                                \
   3000            idx = 31 - idx;                                           \
   3001        }                                                             \
   3002        t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx)                       \
   3003                                : xt->VsrB(idx - 16);                 \
   3004    }                                                                 \
   3005    *xt = t;                                                          \
   3006}
   3007
   3008VSX_XXPERM(xxperm, 0)
   3009VSX_XXPERM(xxpermr, 1)
   3010
   3011void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
   3012{
   3013    ppc_vsr_t t = { };
   3014    uint32_t exp, i, fraction;
   3015
   3016    for (i = 0; i < 4; i++) {
   3017        exp = (xb->VsrW(i) >> 23) & 0xFF;
   3018        fraction = xb->VsrW(i) & 0x7FFFFF;
   3019        if (exp != 0 && exp != 255) {
   3020            t.VsrW(i) = fraction | 0x00800000;
   3021        } else {
   3022            t.VsrW(i) = fraction;
   3023        }
   3024    }
   3025    *xt = t;
   3026}
   3027
   3028/*
   3029 * VSX_TEST_DC - VSX floating point test data class
   3030 *   op    - instruction mnemonic
   3031 *   nels  - number of elements (1, 2 or 4)
   3032 *   xbn   - VSR register number
   3033 *   tp    - type (float32 or float64)
   3034 *   fld   - vsr_t field (VsrD(*) or VsrW(*))
   3035 *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
   3036 *   fld_max - target field max
   3037 *   scrf - set result in CR and FPCC
   3038 */
   3039#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
   3040void helper_##op(CPUPPCState *env, uint32_t opcode)         \
   3041{                                                           \
   3042    ppc_vsr_t *xt = &env->vsr[xT(opcode)];                  \
   3043    ppc_vsr_t *xb = &env->vsr[xbn];                         \
   3044    ppc_vsr_t t = { };                                      \
   3045    uint32_t i, sign, dcmx;                                 \
   3046    uint32_t cc, match = 0;                                 \
   3047                                                            \
   3048    if (!scrf) {                                            \
   3049        dcmx = DCMX_XV(opcode);                             \
   3050    } else {                                                \
   3051        t = *xt;                                            \
   3052        dcmx = DCMX(opcode);                                \
   3053    }                                                       \
   3054                                                            \
   3055    for (i = 0; i < nels; i++) {                            \
   3056        sign = tp##_is_neg(xb->fld);                        \
   3057        if (tp##_is_any_nan(xb->fld)) {                     \
   3058            match = extract32(dcmx, 6, 1);                  \
   3059        } else if (tp##_is_infinity(xb->fld)) {             \
   3060            match = extract32(dcmx, 4 + !sign, 1);          \
   3061        } else if (tp##_is_zero(xb->fld)) {                 \
   3062            match = extract32(dcmx, 2 + !sign, 1);          \
   3063        } else if (tp##_is_zero_or_denormal(xb->fld)) {     \
   3064            match = extract32(dcmx, 0 + !sign, 1);          \
   3065        }                                                   \
   3066                                                            \
   3067        if (scrf) {                                         \
   3068            cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
   3069            env->fpscr &= ~FP_FPCC;                         \
   3070            env->fpscr |= cc << FPSCR_FPCC;                 \
   3071            env->crf[BF(opcode)] = cc;                      \
   3072        } else {                                            \
   3073            t.tfld = match ? fld_max : 0;                   \
   3074        }                                                   \
   3075        match = 0;                                          \
   3076    }                                                       \
   3077    if (!scrf) {                                            \
   3078        *xt = t;                                            \
   3079    }                                                       \
   3080}
   3081
   3082VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
   3083VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
   3084VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
   3085VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
   3086
   3087void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
   3088{
   3089    uint32_t dcmx, sign, exp;
   3090    uint32_t cc, match = 0, not_sp = 0;
   3091
   3092    dcmx = DCMX(opcode);
   3093    exp = (xb->VsrD(0) >> 52) & 0x7FF;
   3094
   3095    sign = float64_is_neg(xb->VsrD(0));
   3096    if (float64_is_any_nan(xb->VsrD(0))) {
   3097        match = extract32(dcmx, 6, 1);
   3098    } else if (float64_is_infinity(xb->VsrD(0))) {
   3099        match = extract32(dcmx, 4 + !sign, 1);
   3100    } else if (float64_is_zero(xb->VsrD(0))) {
   3101        match = extract32(dcmx, 2 + !sign, 1);
   3102    } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
   3103               (exp > 0 && exp < 0x381)) {
   3104        match = extract32(dcmx, 0 + !sign, 1);
   3105    }
   3106
   3107    not_sp = !float64_eq(xb->VsrD(0),
   3108                         float32_to_float64(
   3109                             float64_to_float32(xb->VsrD(0), &env->fp_status),
   3110                             &env->fp_status), &env->fp_status);
   3111
   3112    cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
   3113    env->fpscr &= ~FP_FPCC;
   3114    env->fpscr |= cc << FPSCR_FPCC;
   3115    env->crf[BF(opcode)] = cc;
   3116}
   3117
   3118void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
   3119                   ppc_vsr_t *xt, ppc_vsr_t *xb)
   3120{
   3121    ppc_vsr_t t = { };
   3122    uint8_t r = Rrm(opcode);
   3123    uint8_t ex = Rc(opcode);
   3124    uint8_t rmc = RMC(opcode);
   3125    uint8_t rmode = 0;
   3126    float_status tstat;
   3127
   3128    helper_reset_fpstatus(env);
   3129
   3130    if (r == 0 && rmc == 0) {
   3131        rmode = float_round_ties_away;
   3132    } else if (r == 0 && rmc == 0x3) {
   3133        rmode = fpscr_rn;
   3134    } else if (r == 1) {
   3135        switch (rmc) {
   3136        case 0:
   3137            rmode = float_round_nearest_even;
   3138            break;
   3139        case 1:
   3140            rmode = float_round_to_zero;
   3141            break;
   3142        case 2:
   3143            rmode = float_round_up;
   3144            break;
   3145        case 3:
   3146            rmode = float_round_down;
   3147            break;
   3148        default:
   3149            abort();
   3150        }
   3151    }
   3152
   3153    tstat = env->fp_status;
   3154    set_float_exception_flags(0, &tstat);
   3155    set_float_rounding_mode(rmode, &tstat);
   3156    t.f128 = float128_round_to_int(xb->f128, &tstat);
   3157    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   3158
   3159    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
   3160        if (float128_is_signaling_nan(xb->f128, &tstat)) {
   3161            float_invalid_op_vxsnan(env, GETPC());
   3162            t.f128 = float128_snan_to_qnan(t.f128);
   3163        }
   3164    }
   3165
   3166    if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
   3167        env->fp_status.float_exception_flags &= ~float_flag_inexact;
   3168    }
   3169
   3170    helper_compute_fprf_float128(env, t.f128);
   3171    do_float_check_status(env, GETPC());
   3172    *xt = t;
   3173}
   3174
   3175void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
   3176                    ppc_vsr_t *xt, ppc_vsr_t *xb)
   3177{
   3178    ppc_vsr_t t = { };
   3179    uint8_t r = Rrm(opcode);
   3180    uint8_t rmc = RMC(opcode);
   3181    uint8_t rmode = 0;
   3182    floatx80 round_res;
   3183    float_status tstat;
   3184
   3185    helper_reset_fpstatus(env);
   3186
   3187    if (r == 0 && rmc == 0) {
   3188        rmode = float_round_ties_away;
   3189    } else if (r == 0 && rmc == 0x3) {
   3190        rmode = fpscr_rn;
   3191    } else if (r == 1) {
   3192        switch (rmc) {
   3193        case 0:
   3194            rmode = float_round_nearest_even;
   3195            break;
   3196        case 1:
   3197            rmode = float_round_to_zero;
   3198            break;
   3199        case 2:
   3200            rmode = float_round_up;
   3201            break;
   3202        case 3:
   3203            rmode = float_round_down;
   3204            break;
   3205        default:
   3206            abort();
   3207        }
   3208    }
   3209
   3210    tstat = env->fp_status;
   3211    set_float_exception_flags(0, &tstat);
   3212    set_float_rounding_mode(rmode, &tstat);
   3213    round_res = float128_to_floatx80(xb->f128, &tstat);
   3214    t.f128 = floatx80_to_float128(round_res, &tstat);
   3215    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   3216
   3217    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
   3218        if (float128_is_signaling_nan(xb->f128, &tstat)) {
   3219            float_invalid_op_vxsnan(env, GETPC());
   3220            t.f128 = float128_snan_to_qnan(t.f128);
   3221        }
   3222    }
   3223
   3224    helper_compute_fprf_float128(env, t.f128);
   3225    *xt = t;
   3226    do_float_check_status(env, GETPC());
   3227}
   3228
   3229void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
   3230                     ppc_vsr_t *xt, ppc_vsr_t *xb)
   3231{
   3232    ppc_vsr_t t = { };
   3233    float_status tstat;
   3234
   3235    helper_reset_fpstatus(env);
   3236
   3237    tstat = env->fp_status;
   3238    if (unlikely(Rc(opcode) != 0)) {
   3239        tstat.float_rounding_mode = float_round_to_odd;
   3240    }
   3241
   3242    set_float_exception_flags(0, &tstat);
   3243    t.f128 = float128_sqrt(xb->f128, &tstat);
   3244    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   3245
   3246    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
   3247        if (float128_is_signaling_nan(xb->f128, &tstat)) {
   3248            float_invalid_op_vxsnan(env, GETPC());
   3249            t.f128 = float128_snan_to_qnan(xb->f128);
   3250        } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
   3251            t.f128 = xb->f128;
   3252        } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
   3253            float_invalid_op_vxsqrt(env, 1, GETPC());
   3254            t.f128 = float128_default_nan(&env->fp_status);
   3255        }
   3256    }
   3257
   3258    helper_compute_fprf_float128(env, t.f128);
   3259    *xt = t;
   3260    do_float_check_status(env, GETPC());
   3261}
   3262
   3263void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
   3264                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
   3265{
   3266    ppc_vsr_t t = *xt;
   3267    float_status tstat;
   3268
   3269    helper_reset_fpstatus(env);
   3270
   3271    tstat = env->fp_status;
   3272    if (unlikely(Rc(opcode) != 0)) {
   3273        tstat.float_rounding_mode = float_round_to_odd;
   3274    }
   3275
   3276    set_float_exception_flags(0, &tstat);
   3277    t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
   3278    env->fp_status.float_exception_flags |= tstat.float_exception_flags;
   3279
   3280    if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
   3281        float_invalid_op_addsub(env, 1, GETPC(),
   3282                                float128_classify(xa->f128) |
   3283                                float128_classify(xb->f128));
   3284    }
   3285
   3286    helper_compute_fprf_float128(env, t.f128);
   3287    *xt = t;
   3288    do_float_check_status(env, GETPC());
   3289}