utmath.c (12991B)
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2/******************************************************************************* 3 * 4 * Module Name: utmath - Integer math support routines 5 * 6 ******************************************************************************/ 7 8#include <acpi/acpi.h> 9#include "accommon.h" 10 11#define _COMPONENT ACPI_UTILITIES 12ACPI_MODULE_NAME("utmath") 13 14/* Structures used only for 64-bit divide */ 15typedef struct uint64_struct { 16 u32 lo; 17 u32 hi; 18 19} uint64_struct; 20 21typedef union uint64_overlay { 22 u64 full; 23 struct uint64_struct part; 24 25} uint64_overlay; 26 27/* 28 * Optional support for 64-bit double-precision integer multiply and shift. 29 * This code is configurable and is implemented in order to support 32-bit 30 * kernel environments where a 64-bit double-precision math library is not 31 * available. 32 */ 33#ifndef ACPI_USE_NATIVE_MATH64 34 35/******************************************************************************* 36 * 37 * FUNCTION: acpi_ut_short_multiply 38 * 39 * PARAMETERS: multiplicand - 64-bit multiplicand 40 * multiplier - 32-bit multiplier 41 * out_product - Pointer to where the product is returned 42 * 43 * DESCRIPTION: Perform a short multiply. 44 * 45 ******************************************************************************/ 46 47acpi_status 48acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product) 49{ 50 union uint64_overlay multiplicand_ovl; 51 union uint64_overlay product; 52 u32 carry32; 53 54 ACPI_FUNCTION_TRACE(ut_short_multiply); 55 56 multiplicand_ovl.full = multiplicand; 57 58 /* 59 * The Product is 64 bits, the carry is always 32 bits, 60 * and is generated by the second multiply. 61 */ 62 ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.hi, multiplier, 63 product.part.hi, carry32); 64 65 ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.lo, multiplier, 66 product.part.lo, carry32); 67 68 product.part.hi += carry32; 69 70 /* Return only what was requested */ 71 72 if (out_product) { 73 *out_product = product.full; 74 } 75 76 return_ACPI_STATUS(AE_OK); 77} 78 79/******************************************************************************* 80 * 81 * FUNCTION: acpi_ut_short_shift_left 82 * 83 * PARAMETERS: operand - 64-bit shift operand 84 * count - 32-bit shift count 85 * out_result - Pointer to where the result is returned 86 * 87 * DESCRIPTION: Perform a short left shift. 88 * 89 ******************************************************************************/ 90 91acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result) 92{ 93 union uint64_overlay operand_ovl; 94 95 ACPI_FUNCTION_TRACE(ut_short_shift_left); 96 97 operand_ovl.full = operand; 98 99 if ((count & 63) >= 32) { 100 operand_ovl.part.hi = operand_ovl.part.lo; 101 operand_ovl.part.lo = 0; 102 count = (count & 63) - 32; 103 } 104 ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi, 105 operand_ovl.part.lo, count); 106 107 /* Return only what was requested */ 108 109 if (out_result) { 110 *out_result = operand_ovl.full; 111 } 112 113 return_ACPI_STATUS(AE_OK); 114} 115 116/******************************************************************************* 117 * 118 * FUNCTION: acpi_ut_short_shift_right 119 * 120 * PARAMETERS: operand - 64-bit shift operand 121 * count - 32-bit shift count 122 * out_result - Pointer to where the result is returned 123 * 124 * DESCRIPTION: Perform a short right shift. 125 * 126 ******************************************************************************/ 127 128acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result) 129{ 130 union uint64_overlay operand_ovl; 131 132 ACPI_FUNCTION_TRACE(ut_short_shift_right); 133 134 operand_ovl.full = operand; 135 136 if ((count & 63) >= 32) { 137 operand_ovl.part.lo = operand_ovl.part.hi; 138 operand_ovl.part.hi = 0; 139 count = (count & 63) - 32; 140 } 141 ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi, 142 operand_ovl.part.lo, count); 143 144 /* Return only what was requested */ 145 146 if (out_result) { 147 *out_result = operand_ovl.full; 148 } 149 150 return_ACPI_STATUS(AE_OK); 151} 152#else 153 154/******************************************************************************* 155 * 156 * FUNCTION: acpi_ut_short_multiply 157 * 158 * PARAMETERS: See function headers above 159 * 160 * DESCRIPTION: Native version of the ut_short_multiply function. 161 * 162 ******************************************************************************/ 163 164acpi_status 165acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product) 166{ 167 168 ACPI_FUNCTION_TRACE(ut_short_multiply); 169 170 /* Return only what was requested */ 171 172 if (out_product) { 173 *out_product = multiplicand * multiplier; 174 } 175 176 return_ACPI_STATUS(AE_OK); 177} 178 179/******************************************************************************* 180 * 181 * FUNCTION: acpi_ut_short_shift_left 182 * 183 * PARAMETERS: See function headers above 184 * 185 * DESCRIPTION: Native version of the ut_short_shift_left function. 186 * 187 ******************************************************************************/ 188 189acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result) 190{ 191 192 ACPI_FUNCTION_TRACE(ut_short_shift_left); 193 194 /* Return only what was requested */ 195 196 if (out_result) { 197 *out_result = operand << count; 198 } 199 200 return_ACPI_STATUS(AE_OK); 201} 202 203/******************************************************************************* 204 * 205 * FUNCTION: acpi_ut_short_shift_right 206 * 207 * PARAMETERS: See function headers above 208 * 209 * DESCRIPTION: Native version of the ut_short_shift_right function. 210 * 211 ******************************************************************************/ 212 213acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result) 214{ 215 216 ACPI_FUNCTION_TRACE(ut_short_shift_right); 217 218 /* Return only what was requested */ 219 220 if (out_result) { 221 *out_result = operand >> count; 222 } 223 224 return_ACPI_STATUS(AE_OK); 225} 226#endif 227 228/* 229 * Optional support for 64-bit double-precision integer divide. This code 230 * is configurable and is implemented in order to support 32-bit kernel 231 * environments where a 64-bit double-precision math library is not available. 232 * 233 * Support for a more normal 64-bit divide/modulo (with check for a divide- 234 * by-zero) appears after this optional section of code. 235 */ 236#ifndef ACPI_USE_NATIVE_DIVIDE 237 238/******************************************************************************* 239 * 240 * FUNCTION: acpi_ut_short_divide 241 * 242 * PARAMETERS: dividend - 64-bit dividend 243 * divisor - 32-bit divisor 244 * out_quotient - Pointer to where the quotient is returned 245 * out_remainder - Pointer to where the remainder is returned 246 * 247 * RETURN: Status (Checks for divide-by-zero) 248 * 249 * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) 250 * divide and modulo. The result is a 64-bit quotient and a 251 * 32-bit remainder. 252 * 253 ******************************************************************************/ 254 255acpi_status 256acpi_ut_short_divide(u64 dividend, 257 u32 divisor, u64 *out_quotient, u32 *out_remainder) 258{ 259 union uint64_overlay dividend_ovl; 260 union uint64_overlay quotient; 261 u32 remainder32; 262 263 ACPI_FUNCTION_TRACE(ut_short_divide); 264 265 /* Always check for a zero divisor */ 266 267 if (divisor == 0) { 268 ACPI_ERROR((AE_INFO, "Divide by zero")); 269 return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); 270 } 271 272 dividend_ovl.full = dividend; 273 274 /* 275 * The quotient is 64 bits, the remainder is always 32 bits, 276 * and is generated by the second divide. 277 */ 278 ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor, 279 quotient.part.hi, remainder32); 280 281 ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor, 282 quotient.part.lo, remainder32); 283 284 /* Return only what was requested */ 285 286 if (out_quotient) { 287 *out_quotient = quotient.full; 288 } 289 if (out_remainder) { 290 *out_remainder = remainder32; 291 } 292 293 return_ACPI_STATUS(AE_OK); 294} 295 296/******************************************************************************* 297 * 298 * FUNCTION: acpi_ut_divide 299 * 300 * PARAMETERS: in_dividend - Dividend 301 * in_divisor - Divisor 302 * out_quotient - Pointer to where the quotient is returned 303 * out_remainder - Pointer to where the remainder is returned 304 * 305 * RETURN: Status (Checks for divide-by-zero) 306 * 307 * DESCRIPTION: Perform a divide and modulo. 308 * 309 ******************************************************************************/ 310 311acpi_status 312acpi_ut_divide(u64 in_dividend, 313 u64 in_divisor, u64 *out_quotient, u64 *out_remainder) 314{ 315 union uint64_overlay dividend; 316 union uint64_overlay divisor; 317 union uint64_overlay quotient; 318 union uint64_overlay remainder; 319 union uint64_overlay normalized_dividend; 320 union uint64_overlay normalized_divisor; 321 u32 partial1; 322 union uint64_overlay partial2; 323 union uint64_overlay partial3; 324 325 ACPI_FUNCTION_TRACE(ut_divide); 326 327 /* Always check for a zero divisor */ 328 329 if (in_divisor == 0) { 330 ACPI_ERROR((AE_INFO, "Divide by zero")); 331 return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); 332 } 333 334 divisor.full = in_divisor; 335 dividend.full = in_dividend; 336 if (divisor.part.hi == 0) { 337 /* 338 * 1) Simplest case is where the divisor is 32 bits, we can 339 * just do two divides 340 */ 341 remainder.part.hi = 0; 342 343 /* 344 * The quotient is 64 bits, the remainder is always 32 bits, 345 * and is generated by the second divide. 346 */ 347 ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo, 348 quotient.part.hi, partial1); 349 350 ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo, 351 quotient.part.lo, remainder.part.lo); 352 } 353 354 else { 355 /* 356 * 2) The general case where the divisor is a full 64 bits 357 * is more difficult 358 */ 359 quotient.part.hi = 0; 360 normalized_dividend = dividend; 361 normalized_divisor = divisor; 362 363 /* Normalize the operands (shift until the divisor is < 32 bits) */ 364 365 do { 366 ACPI_SHIFT_RIGHT_64(normalized_divisor.part.hi, 367 normalized_divisor.part.lo); 368 ACPI_SHIFT_RIGHT_64(normalized_dividend.part.hi, 369 normalized_dividend.part.lo); 370 371 } while (normalized_divisor.part.hi != 0); 372 373 /* Partial divide */ 374 375 ACPI_DIV_64_BY_32(normalized_dividend.part.hi, 376 normalized_dividend.part.lo, 377 normalized_divisor.part.lo, quotient.part.lo, 378 partial1); 379 380 /* 381 * The quotient is always 32 bits, and simply requires 382 * adjustment. The 64-bit remainder must be generated. 383 */ 384 partial1 = quotient.part.lo * divisor.part.hi; 385 partial2.full = (u64) quotient.part.lo * divisor.part.lo; 386 partial3.full = (u64) partial2.part.hi + partial1; 387 388 remainder.part.hi = partial3.part.lo; 389 remainder.part.lo = partial2.part.lo; 390 391 if (partial3.part.hi == 0) { 392 if (partial3.part.lo >= dividend.part.hi) { 393 if (partial3.part.lo == dividend.part.hi) { 394 if (partial2.part.lo > dividend.part.lo) { 395 quotient.part.lo--; 396 remainder.full -= divisor.full; 397 } 398 } else { 399 quotient.part.lo--; 400 remainder.full -= divisor.full; 401 } 402 } 403 404 remainder.full = remainder.full - dividend.full; 405 remainder.part.hi = (u32)-((s32)remainder.part.hi); 406 remainder.part.lo = (u32)-((s32)remainder.part.lo); 407 408 if (remainder.part.lo) { 409 remainder.part.hi--; 410 } 411 } 412 } 413 414 /* Return only what was requested */ 415 416 if (out_quotient) { 417 *out_quotient = quotient.full; 418 } 419 if (out_remainder) { 420 *out_remainder = remainder.full; 421 } 422 423 return_ACPI_STATUS(AE_OK); 424} 425 426#else 427 428/******************************************************************************* 429 * 430 * FUNCTION: acpi_ut_short_divide, acpi_ut_divide 431 * 432 * PARAMETERS: See function headers above 433 * 434 * DESCRIPTION: Native versions of the ut_divide functions. Use these if either 435 * 1) The target is a 64-bit platform and therefore 64-bit 436 * integer math is supported directly by the machine. 437 * 2) The target is a 32-bit or 16-bit platform, and the 438 * double-precision integer math library is available to 439 * perform the divide. 440 * 441 ******************************************************************************/ 442 443acpi_status 444acpi_ut_short_divide(u64 in_dividend, 445 u32 divisor, u64 *out_quotient, u32 *out_remainder) 446{ 447 448 ACPI_FUNCTION_TRACE(ut_short_divide); 449 450 /* Always check for a zero divisor */ 451 452 if (divisor == 0) { 453 ACPI_ERROR((AE_INFO, "Divide by zero")); 454 return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); 455 } 456 457 /* Return only what was requested */ 458 459 if (out_quotient) { 460 *out_quotient = in_dividend / divisor; 461 } 462 if (out_remainder) { 463 *out_remainder = (u32) (in_dividend % divisor); 464 } 465 466 return_ACPI_STATUS(AE_OK); 467} 468 469acpi_status 470acpi_ut_divide(u64 in_dividend, 471 u64 in_divisor, u64 *out_quotient, u64 *out_remainder) 472{ 473 ACPI_FUNCTION_TRACE(ut_divide); 474 475 /* Always check for a zero divisor */ 476 477 if (in_divisor == 0) { 478 ACPI_ERROR((AE_INFO, "Divide by zero")); 479 return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); 480 } 481 482 /* Return only what was requested */ 483 484 if (out_quotient) { 485 *out_quotient = in_dividend / in_divisor; 486 } 487 if (out_remainder) { 488 *out_remainder = in_dividend % in_divisor; 489 } 490 491 return_ACPI_STATUS(AE_OK); 492} 493 494#endif