+++ /dev/null
-\r
-#include <stdbool.h>\r
-#include <stdint.h>\r
-#include "platform.h"\r
-#include "primitives.h"\r
-#include "internals.h"\r
-#include "specialize.h"\r
-#include "softfloat.h"\r
-\r
-float64_t\r
- softfloat_mulAddF64(\r
- int op, uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC )\r
-{\r
- bool signA;\r
- int_fast16_t expA;\r
- uint_fast64_t sigA;\r
- bool signB;\r
- int_fast16_t expB;\r
- uint_fast64_t sigB;\r
- bool signC;\r
- int_fast16_t expC;\r
- uint_fast64_t sigC;\r
- bool signProd;\r
- uint_fast64_t magBits, uiZ;\r
- struct exp16_sig64 normExpSig;\r
- int_fast16_t expProd;\r
- struct uint128 sigProd;\r
- bool signZ;\r
- int_fast16_t expZ;\r
- uint_fast64_t sigZ;\r
- int_fast16_t expDiff;\r
- struct uint128 sigC128, sigZ128;\r
- int shiftCount;\r
- union ui64_f64 uZ;\r
-\r
- signA = signF64UI( uiA );\r
- expA = expF64UI( uiA );\r
- sigA = fracF64UI( uiA );\r
- signB = signF64UI( uiB );\r
- expB = expF64UI( uiB );\r
- sigB = fracF64UI( uiB );\r
- signC = signF64UI( uiC ) ^ ( op == softfloat_mulAdd_subC );\r
- expC = expF64UI( uiC );\r
- sigC = fracF64UI( uiC );\r
- signProd = signA ^ signB ^ ( op == softfloat_mulAdd_subProd );\r
- if ( expA == 0x7FF ) {\r
- if ( sigA || ( ( expB == 0x7FF ) && sigB ) ) goto propagateNaN_ABC;\r
- magBits = expB | sigB;\r
- goto infProdArg;\r
- }\r
- if ( expB == 0x7FF ) {\r
- if ( sigB ) goto propagateNaN_ABC;\r
- magBits = expA | sigA;\r
- goto infProdArg;\r
- }\r
- if ( expC == 0x7FF ) {\r
- if ( sigC ) {\r
- uiZ = 0;\r
- goto propagateNaN_ZC;\r
- }\r
- uiZ = uiC;\r
- goto uiZ;\r
- }\r
- if ( ! expA ) {\r
- if ( ! sigA ) goto zeroProd;\r
- normExpSig = softfloat_normSubnormalF64Sig( sigA );\r
- expA = normExpSig.exp;\r
- sigA = normExpSig.sig;\r
- }\r
- if ( ! expB ) {\r
- if ( ! sigB ) goto zeroProd;\r
- normExpSig = softfloat_normSubnormalF64Sig( sigB );\r
- expB = normExpSig.exp;\r
- sigB = normExpSig.sig;\r
- }\r
- expProd = expA + expB - 0x3FE;\r
- sigA = ( sigA | UINT64_C( 0x0010000000000000 ) )<<10;\r
- sigB = ( sigB | UINT64_C( 0x0010000000000000 ) )<<10;\r
- sigProd = softfloat_mul64To128( sigA, sigB );\r
- if ( sigProd.v64 < UINT64_C( 0x2000000000000000 ) ) {\r
- --expProd;\r
- sigProd = softfloat_shortShift128Left( sigProd.v64, sigProd.v0, 1 );\r
- }\r
- signZ = signProd;\r
- if ( ! expC ) {\r
- if ( ! sigC ) {\r
- expZ = expProd - 1;\r
- sigZ = sigProd.v64<<1 | ( sigProd.v0 != 0 );\r
- goto roundPack;\r
- }\r
- normExpSig = softfloat_normSubnormalF64Sig( sigC );\r
- expC = normExpSig.exp;\r
- sigC = normExpSig.sig;\r
- }\r
- sigC = ( sigC | UINT64_C( 0x0010000000000000 ) )<<9;\r
- expDiff = expProd - expC;\r
- if ( signProd == signC ) {\r
- if ( expDiff <= 0 ) {\r
- expZ = expC;\r
- if ( expDiff ) {\r
- sigProd.v64 =\r
- softfloat_shift64RightJam( sigProd.v64, - expDiff );\r
- }\r
- sigZ = ( sigC + sigProd.v64 ) | ( sigProd.v0 != 0 );\r
- } else {\r
- expZ = expProd;\r
- sigC128 = softfloat_shift128RightJam( sigC, 0, expDiff );\r
- sigZ128 =\r
- softfloat_add128(\r
- sigProd.v64, sigProd.v0, sigC128.v64, sigC128.v0 );\r
- sigZ = sigZ128.v64 | ( sigZ128.v0 != 0 );\r
- }\r
- if ( sigZ < UINT64_C( 0x4000000000000000 ) ) {\r
- --expZ;\r
- sigZ <<= 1;\r
- }\r
- } else {\r
-/*** OPTIMIZE BETTER? ***/\r
- if ( expDiff < 0 ) {\r
- signZ = signC;\r
- expZ = expC;\r
- sigProd =\r
- softfloat_shift128RightJam(\r
- sigProd.v64, sigProd.v0, - expDiff );\r
- sigZ128 = softfloat_sub128( sigC, 0, sigProd.v64, sigProd.v0 );\r
- } else if ( ! expDiff ) {\r
- expZ = expProd;\r
- sigZ128 = softfloat_sub128( sigProd.v64, sigProd.v0, sigC, 0 );\r
- if ( ! ( sigZ128.v64 | sigZ128.v0 ) ) goto completeCancellation;\r
- if ( sigZ128.v64 & UINT64_C( 0x8000000000000000 ) ) {\r
- signZ ^= 1;\r
- sigZ128 = softfloat_sub128( 0, 0, sigZ128.v64, sigZ128.v0 );\r
- }\r
- } else {\r
- expZ = expProd;\r
- sigC128 = softfloat_shift128RightJam( sigC, 0, expDiff );\r
- sigZ128 =\r
- softfloat_sub128(\r
- sigProd.v64, sigProd.v0, sigC128.v64, sigC128.v0 );\r
- }\r
- if ( ! sigZ128.v64 ) {\r
- expZ -= 64;\r
- sigZ128.v64 = sigZ128.v0;\r
- sigZ128.v0 = 0;\r
- }\r
- shiftCount = softfloat_countLeadingZeros64( sigZ128.v64 ) - 1;\r
- expZ -= shiftCount;\r
- if ( shiftCount < 0 ) {\r
- sigZ = softfloat_shortShift64RightJam( sigZ128.v64, - shiftCount );\r
- } else {\r
- sigZ128 =\r
- softfloat_shortShift128Left(\r
- sigZ128.v64, sigZ128.v0, shiftCount );\r
- sigZ = sigZ128.v64;\r
- }\r
- sigZ |= ( sigZ128.v0 != 0 );\r
- }\r
- roundPack:\r
- return softfloat_roundPackToF64( signZ, expZ, sigZ );\r
- propagateNaN_ABC:\r
- uiZ = softfloat_propagateNaNF64UI( uiA, uiB );\r
- goto propagateNaN_ZC;\r
- infProdArg:\r
- if ( magBits ) {\r
- uiZ = packToF64UI( signProd, 0x7FF, 0 );\r
- if ( expC != 0x7FF ) goto uiZ;\r
- if ( sigC ) goto propagateNaN_ZC;\r
- if ( signProd == signC ) goto uiZ;\r
- }\r
- invalid:\r
- softfloat_raiseFlags( softfloat_flag_invalid );\r
- uiZ = defaultNaNF64UI;\r
- propagateNaN_ZC:\r
- uiZ = softfloat_propagateNaNF64UI( uiZ, uiC );\r
- goto uiZ;\r
- zeroProd:\r
- uiZ = uiC;\r
- if ( ! ( expC | sigC ) && ( signProd != signC ) ) {\r
- completeCancellation:\r
- uiZ =\r
- packToF64UI( softfloat_roundingMode == softfloat_round_min, 0, 0 );\r
- }\r
- uiZ:\r
- uZ.ui = uiZ;\r
- return uZ.f;\r
-\r
-}\r
-\r