test_fp0_arith.S (10976B)
1#include "macros.inc" 2#include "fpu.h" 3 4test_suite fp0_arith 5 6#if XCHAL_HAVE_FP 7 8.macro movfp fr, v 9 movi a2, \v 10 wfr \fr, a2 11.endm 12 13.macro check_res fr, r, sr 14 rfr a2, \fr 15 dump a2 16 movi a3, \r 17 assert eq, a2, a3 18 rur a2, fsr 19#if DFPU 20 movi a3, \sr 21 assert eq, a2, a3 22#else 23 assert eqi, a2, 0 24#endif 25.endm 26 27test add_s 28 movi a2, 1 29 wsr a2, cpenable 30 31 test_op2 add.s, f0, f1, f2, 0x3fc00000, 0x34400000, \ 32 0x3fc00002, 0x3fc00001, 0x3fc00002, 0x3fc00001, \ 33 FSR_I, FSR_I, FSR_I, FSR_I 34 test_op2 add.s, f3, f4, f5, 0x3fc00000, 0x34a00000, \ 35 0x3fc00002, 0x3fc00002, 0x3fc00003, 0x3fc00002, \ 36 FSR_I, FSR_I, FSR_I, FSR_I 37 38 /* MAX_FLOAT + MAX_FLOAT = +inf/MAX_FLOAT */ 39 test_op2 add.s, f6, f7, f8, 0x7f7fffff, 0x7f7fffff, \ 40 0x7f800000, 0x7f7fffff, 0x7f800000, 0x7f7fffff, \ 41 FSR_OI, FSR_OI, FSR_OI, FSR_OI 42test_end 43 44test add_s_inf 45 /* 1 + +inf = +inf */ 46 test_op2 add.s, f6, f7, f8, 0x3fc00000, 0x7f800000, \ 47 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000, \ 48 FSR__, FSR__, FSR__, FSR__ 49 50 /* +inf + -inf = default NaN */ 51 test_op2 add.s, f0, f1, f2, 0x7f800000, 0xff800000, \ 52 0x7fc00000, 0x7fc00000, 0x7fc00000, 0x7fc00000, \ 53 FSR_V, FSR_V, FSR_V, FSR_V 54test_end 55 56#if DFPU 57test add_s_nan_dfpu 58 /* 1 + QNaN = QNaN */ 59 test_op2 add.s, f9, f10, f11, 0x3fc00000, 0x7fc00001, \ 60 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \ 61 FSR__, FSR__, FSR__, FSR__ 62 /* 1 + SNaN = QNaN */ 63 test_op2 add.s, f12, f13, f14, 0x3fc00000, 0x7f800001, \ 64 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \ 65 FSR_V, FSR_V, FSR_V, FSR_V 66 67 /* SNaN1 + SNaN2 = QNaN2 */ 68 test_op2 add.s, f15, f0, f1, 0x7f800001, 0x7fbfffff, \ 69 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, \ 70 FSR_V, FSR_V, FSR_V, FSR_V 71 test_op2 add.s, f2, f3, f4, 0x7fbfffff, 0x7f800001, \ 72 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \ 73 FSR_V, FSR_V, FSR_V, FSR_V 74 /* QNaN1 + SNaN2 = QNaN2 */ 75 test_op2 add.s, f5, f6, f7, 0x7fc00001, 0x7fbfffff, \ 76 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, \ 77 FSR_V, FSR_V, FSR_V, FSR_V 78 /* SNaN1 + QNaN2 = QNaN2 */ 79 test_op2 add.s, f8, f9, f10, 0x7fbfffff, 0x7fc00001, \ 80 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \ 81 FSR_V, FSR_V, FSR_V, FSR_V 82test_end 83#else 84test add_s_nan_fpu2k 85 /* 1 + QNaN = QNaN */ 86 test_op2 add.s, f9, f10, f11, 0x3fc00000, 0x7fc00001, \ 87 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \ 88 FSR__, FSR__, FSR__, FSR__ 89 /* 1 + SNaN = SNaN */ 90 test_op2 add.s, f12, f13, f14, 0x3fc00000, 0x7f800001, \ 91 0x7f800001, 0x7f800001, 0x7f800001, 0x7f800001, \ 92 FSR__, FSR__, FSR__, FSR__ 93 /* SNaN1 + SNaN2 = SNaN1 */ 94 test_op2 add.s, f15, f0, f1, 0x7f800001, 0x7fbfffff, \ 95 0x7f800001, 0x7f800001, 0x7f800001, 0x7f800001, \ 96 FSR__, FSR__, FSR__, FSR__ 97 test_op2 add.s, f2, f3, f4, 0x7fbfffff, 0x7f800001, \ 98 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, \ 99 FSR__, FSR__, FSR__, FSR__ 100 /* QNaN1 + SNaN2 = QNaN1 */ 101 test_op2 add.s, f5, f6, f7, 0x7fc00001, 0x7fbfffff, \ 102 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, \ 103 FSR__, FSR__, FSR__, FSR__ 104 /* SNaN1 + QNaN2 = SNaN1 */ 105 test_op2 add.s, f8, f9, f10, 0x7fbfffff, 0x7fc00001, \ 106 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, 0x7fbfffff, \ 107 FSR__, FSR__, FSR__, FSR__ 108test_end 109#endif 110 111test sub_s 112 test_op2 sub.s, f0, f1, f0, 0x3f800001, 0x33800000, \ 113 0x3f800000, 0x3f800000, 0x3f800001, 0x3f800000, \ 114 FSR_I, FSR_I, FSR_I, FSR_I 115 test_op2 sub.s, f0, f1, f1, 0x3f800002, 0x33800000, \ 116 0x3f800002, 0x3f800001, 0x3f800002, 0x3f800001, \ 117 FSR_I, FSR_I, FSR_I, FSR_I 118 119 /* norm - norm = denorm */ 120 test_op2 sub.s, f6, f7, f8, 0x00800001, 0x00800000, \ 121 0x00000001, 0x00000001, 0x00000001, 0x00000001, \ 122 FSR__, FSR__, FSR__, FSR__ 123test_end 124 125test mul_s 126 test_op2 mul.s, f0, f1, f2, 0x3f800001, 0x3f800001, \ 127 0x3f800002, 0x3f800002, 0x3f800003, 0x3f800002, \ 128 FSR_I, FSR_I, FSR_I, FSR_I 129 /* MAX_FLOAT/2 * MAX_FLOAT/2 = +inf/MAX_FLOAT */ 130 test_op2 mul.s, f6, f7, f8, 0x7f000000, 0x7f000000, \ 131 0x7f800000, 0x7f7fffff, 0x7f800000, 0x7f7fffff, \ 132 FSR_OI, FSR_OI, FSR_OI, FSR_OI 133 /* min norm * min norm = 0/denorm */ 134 test_op2 mul.s, f6, f7, f8, 0x00800001, 0x00800000, \ 135 0x00000000, 0x00000000, 0x00000001, 0x00000000, \ 136 FSR_UI, FSR_UI, FSR_UI, FSR_UI 137 /* inf * 0 = default NaN */ 138 test_op2 mul.s, f6, f7, f8, 0x7f800000, 0x00000000, \ 139 0x7fc00000, 0x7fc00000, 0x7fc00000, 0x7fc00000, \ 140 FSR_V, FSR_V, FSR_V, FSR_V 141test_end 142 143test madd_s 144 test_op3 madd.s, f0, f1, f2, f0, 0, 0x3f800001, 0x3f800001, \ 145 0x3f800002, 0x3f800002, 0x3f800003, 0x3f800002, \ 146 FSR_I, FSR_I, FSR_I, FSR_I 147test_end 148 149test madd_s_precision 150 test_op3 madd.s, f0, f1, f2, f0, 0xbf800002, 0x3f800001, 0x3f800001, \ 151 0x28800000, 0x28800000, 0x28800000, 0x28800000, \ 152 FSR__, FSR__, FSR__, FSR__ 153test_end 154 155#if DFPU 156test madd_s_nan_dfpu 157 /* DFPU madd/msub NaN1, NaN2, NaN3 priority: NaN1, NaN3, NaN2 */ 158 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_1, \ 159 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 160 FSR__, FSR__, FSR__, FSR__ 161 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_1, \ 162 F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \ 163 FSR__, FSR__, FSR__, FSR__ 164 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_1, F32_QNAN(3), \ 165 F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \ 166 FSR__, FSR__, FSR__, FSR__ 167 168 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_1, \ 169 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 170 FSR__, FSR__, FSR__, FSR__ 171 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_QNAN(3), \ 172 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 173 FSR__, FSR__, FSR__, FSR__ 174 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_QNAN(3), \ 175 F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \ 176 FSR__, FSR__, FSR__, FSR__ 177 178 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_QNAN(3), \ 179 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 180 FSR__, FSR__, FSR__, FSR__ 181 182 /* inf * 0 = default NaN */ 183 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_PINF, F32_0, \ 184 F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \ 185 FSR_V, FSR_V, FSR_V, FSR_V 186 /* inf * 0 + SNaN1 = QNaN1 */ 187 test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_PINF, F32_0, \ 188 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 189 FSR_V, FSR_V, FSR_V, FSR_V 190 /* inf * 0 + QNaN1 = QNaN1 */ 191 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_PINF, F32_0, \ 192 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 193 FSR_V, FSR_V, FSR_V, FSR_V 194 195 /* madd/msub SNaN turns to QNaN and sets Invalid flag */ 196 test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_1, F32_1, \ 197 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 198 FSR_V, FSR_V, FSR_V, FSR_V 199 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_SNAN(2), F32_1, \ 200 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 201 FSR_V, FSR_V, FSR_V, FSR_V 202test_end 203#else 204test madd_s_nan_fpu2k 205 /* FPU2000 madd/msub NaN1, NaN2, NaN3 priority: NaN2, NaN3, NaN1 */ 206 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_1, \ 207 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 208 FSR__, FSR__, FSR__, FSR__ 209 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_1, \ 210 F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \ 211 FSR__, FSR__, FSR__, FSR__ 212 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_1, F32_QNAN(3), \ 213 F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \ 214 FSR__, FSR__, FSR__, FSR__ 215 216 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_1, \ 217 F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \ 218 FSR__, FSR__, FSR__, FSR__ 219 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_1, F32_QNAN(3), \ 220 F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), F32_QNAN(3), \ 221 FSR__, FSR__, FSR__, FSR__ 222 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_QNAN(2), F32_QNAN(3), \ 223 F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \ 224 FSR__, FSR__, FSR__, FSR__ 225 226 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_QNAN(2), F32_QNAN(3), \ 227 F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), F32_QNAN(2), \ 228 FSR__, FSR__, FSR__, FSR__ 229 230 /* inf * 0 = default NaN */ 231 test_op3 madd.s, f0, f1, f2, f0, F32_1, F32_PINF, F32_0, \ 232 F32_DNAN, F32_DNAN, F32_DNAN, F32_DNAN, \ 233 FSR__, FSR__, FSR__, FSR__ 234 /* inf * 0 + SNaN1 = SNaN1 */ 235 test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_PINF, F32_0, \ 236 F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), \ 237 FSR__, FSR__, FSR__, FSR__ 238 /* inf * 0 + QNaN1 = QNaN1 */ 239 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_PINF, F32_0, \ 240 F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), F32_QNAN(1), \ 241 FSR__, FSR__, FSR__, FSR__ 242 243 /* madd/msub SNaN is preserved */ 244 test_op3 madd.s, f0, f1, f2, f0, F32_SNAN(1), F32_1, F32_1, \ 245 F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), F32_SNAN(1), \ 246 FSR__, FSR__, FSR__, FSR__ 247 test_op3 madd.s, f0, f1, f2, f0, F32_QNAN(1), F32_SNAN(2), F32_1, \ 248 F32_SNAN(2), F32_SNAN(2), F32_SNAN(2), F32_SNAN(2), \ 249 FSR__, FSR__, FSR__, FSR__ 250test_end 251#endif 252 253test msub_s 254 test_op3 msub.s, f0, f1, f2, f0, 0x3f800000, 0x3f800001, 0x3f800001, \ 255 0xb4800000, 0xb4800000, 0xb4800000, 0xb4800001, \ 256 FSR_I, FSR_I, FSR_I, FSR_I 257test_end 258 259#endif 260 261test_suite_end