From fc98e575192e6a07e0cb56118d8800ee7c98c6a0 Mon Sep 17 00:00:00 2001 From: Aina Niemetz Date: Mon, 3 May 2021 10:48:06 -0700 Subject: [PATCH] SymFPU: Automatically apply patch from 2020-11-14. (#6471) This automatically applies @martin-cs's working patch from 2020-11-14. It fixes several issues, all covered open issues are added as regression tests. Fixes #3582. Fixes #5511. Fixes #6164. --- .github/workflows/ci.yml | 1 - cmake/FindSymFPU.cmake | 2 + cmake/deps-utils/SymFPU-patch-20201114.patch | 348 +++++++++++++++++++ test/regress/CMakeLists.txt | 4 + test/regress/regress0/fp/from_ubv.smt2 | 5 + test/regress/regress0/fp/issue3582.smt2 | 6 + test/regress/regress0/fp/issue5511.smt2 | 5 + test/regress/regress0/fp/issue6164.smt2 | 6 + 8 files changed, 376 insertions(+), 1 deletion(-) create mode 100644 cmake/deps-utils/SymFPU-patch-20201114.patch create mode 100644 test/regress/regress0/fp/from_ubv.smt2 create mode 100644 test/regress/regress0/fp/issue3582.smt2 create mode 100644 test/regress/regress0/fp/issue5511.smt2 create mode 100644 test/regress/regress0/fp/issue6164.smt2 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d216d07fe..b53370f60 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -168,7 +168,6 @@ jobs: with: path: build/deps key: deps-${{ runner.os }}-${{ matrix.cache-key }}-${{ hashFiles('cmake/Find**', 'cmake/deps-helper.cmake') }}-${{ hashFiles('.github/workflows/ci.yml') }} - restore-keys: deps-${{ runner.os }}-${{ matrix.cache-key }}- - name: Configure run: | diff --git a/cmake/FindSymFPU.cmake b/cmake/FindSymFPU.cmake index 5a2ac5111..2f779abc0 100644 --- a/cmake/FindSymFPU.cmake +++ b/cmake/FindSymFPU.cmake @@ -39,6 +39,8 @@ if(NOT SymFPU_FOUND_SYSTEM) ${COMMON_EP_CONFIG} URL https://github.com/martin-cs/symfpu/archive/${SymFPU_COMMIT}.tar.gz URL_HASH SHA1=9e00045130b93e3c2a46ce73a1b5b6451340dc46 + PATCH_COMMAND patch -p1 -d + -i ${CMAKE_CURRENT_LIST_DIR}/deps-utils/SymFPU-patch-20201114.patch CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND ${CMAKE_COMMAND} -E copy_directory /core diff --git a/cmake/deps-utils/SymFPU-patch-20201114.patch b/cmake/deps-utils/SymFPU-patch-20201114.patch new file mode 100644 index 000000000..a3942bc70 --- /dev/null +++ b/cmake/deps-utils/SymFPU-patch-20201114.patch @@ -0,0 +1,348 @@ +# Copyright (C) 2020 Martin Brain. + +diff --git a/core/add.h b/core/add.h +index 44ee8ae..028ccae 100644 +--- a/core/add.h ++++ b/core/add.h +@@ -396,15 +396,14 @@ template + ubv::zero(alignedSum.getWidth()), + (shiftedStickyBit | ITE(!overflow, ubv::zero(1), sum.extract(0,0)).extend(alignedSum.getWidth() - 1)))); + +- +- // Put it back together +- unpackedFloat sumResult(resultSign, correctedExponent, (alignedSum | stickyBit).contract(1)); +- + // We return something in an extended format + // *. One extra exponent bit to deal with the 'overflow' case + // *. Two extra significand bits for the guard and sticky bits + fpt extendedFormat(format.exponentWidth() + 1, format.significandWidth() + 2); + ++ // Put it back together ++ unpackedFloat sumResult(extendedFormat, resultSign, correctedExponent, (alignedSum | stickyBit).contract(1)); ++ + // Deal with the major cancellation case + // It would be nice to use normaliseUpDetectZero but the sign + // of the zero depends on the rounding mode. +@@ -524,7 +523,7 @@ template + extendedLargerExponent.decrement()))); + + // Far path : Construct result +- unpackedFloat farPathResult(resultSign, correctedExponent, (alignedSum | shiftedStickyBit).contract(1)); ++ unpackedFloat farPathResult(extendedFormat, resultSign, correctedExponent, (alignedSum | shiftedStickyBit).contract(1)); + + + +@@ -542,13 +541,14 @@ template + prop nearNoCancel(nearSum.extract(sumWidth - 2, sumWidth - 2).isAllOnes()); + + ubv choppedNearSum(nearSum.extract(sumWidth - 3,1)); // In the case this is used, cut bits are all 0 +- unpackedFloat cancellation(resultSign, ++ unpackedFloat cancellation(extendedFormat, ++ resultSign, + larger.getExponent().decrement(), + choppedNearSum); + + + // Near path : Construct result +- unpackedFloat nearPathResult(resultSign, extendedLargerExponent, nearSum.contract(1)); ++ unpackedFloat nearPathResult(extendedFormat, resultSign, extendedLargerExponent, nearSum.contract(1)); + + + +diff --git a/core/convert.h b/core/convert.h +index 27b1f58..372f3a5 100644 +--- a/core/convert.h ++++ b/core/convert.h +@@ -94,14 +94,20 @@ unpackedFloat roundToIntegral (const typename t::fpt &format, + + // Otherwise, compute rounding location + sbv initialRoundingPoint(expandingSubtract(packedSigWidth,exponent)); // Expansion only needed in obscure formats +- sbv roundingPoint(collar(initialRoundingPoint, +- sbv::zero(exponentWidth + 1), +- unpackedSigWidth.extend(1).increment())); ++ sbv collaredRoundingPoint(collar(initialRoundingPoint, ++ sbv::zero(exponentWidth + 1), ++ unpackedSigWidth.extend(1).increment())); + + // Round + ubv significand(input.getSignificand()); ++ bwt significandWidth(significand.getWidth()); ++ ubv roundingPoint((significandWidth >= exponentWidth) ? ++ collaredRoundingPoint.toUnsigned().matchWidth(significand) : ++ collaredRoundingPoint.toUnsigned().extract(significandWidth - 1, 0)); ++ // Extract is safe because of the collar ++ + significandRounderResult roundedResult(variablePositionRound(roundingMode, input.getSign(), significand, +- roundingPoint.toUnsigned().matchWidth(significand), ++ roundingPoint, + prop(false), // TODO : Could actually be exponent >= 0 + isID)); // The fast-path case so just deactives some code + +@@ -168,14 +174,18 @@ unpackedFloat roundToIntegral (const typename t::fpt &format, + template + unpackedFloat convertUBVToFloat (const typename t::fpt &targetFormat, + const typename t::rm &roundingMode, +- const typename t::ubv &input, ++ const typename t::ubv &preInput, + const typename t::bwt &decimalPointPosition = 0) { + + typedef typename t::bwt bwt; + typedef typename t::prop prop; ++ typedef typename t::ubv ubv; + typedef typename t::sbv sbv; + typedef typename t::fpt fpt; + ++ // In the case of a 1 bit input(?) extend to 2 bits so that the intermediate float is a sensible format ++ ubv input((preInput.getWidth() == 1) ? preInput.extend(1) : preInput); ++ + bwt inputWidth(input.getWidth()); + + PRECONDITION(decimalPointPosition <= inputWidth); +@@ -208,6 +218,7 @@ template + + bwt inputWidth(input.getWidth()); + ++ PRECONDITION(inputWidth > 1); // A 1 bit signed-number is ??? + PRECONDITION(decimalPointPosition <= inputWidth); + + // Devise an appropriate format +diff --git a/core/divide.h b/core/divide.h +index 9cfcebc..90f126a 100644 +--- a/core/divide.h ++++ b/core/divide.h +@@ -105,7 +105,8 @@ template + ubv finishedSignificand(alignedSignificand | ubv(divided.remainderBit).extend(resWidth - 1)); + + // Put back together +- unpackedFloat divideResult(divideSign, alignedExponent.extend(1), finishedSignificand); ++ fpt extendedFormat(format.exponentWidth() + 1, format.significandWidth() + 2); ++ unpackedFloat divideResult(extendedFormat, divideSign, alignedExponent, finishedSignificand); + + // A brief word about formats. + // You might think that the extend above is unnecessary : it is from a overflow point of view. +@@ -115,7 +116,6 @@ template + // can have an exponent greater than very large normal * 2 ( + 1) + // because the exponent range is asymmetric with more subnormal than normal. + +- fpt extendedFormat(format.exponentWidth() + 2, format.significandWidth() + 2); + POSTCONDITION(divideResult.valid(extendedFormat)); + + return divideResult; +diff --git a/core/multiply.h b/core/multiply.h +index f464de1..7aeb463 100644 +--- a/core/multiply.h ++++ b/core/multiply.h +@@ -97,10 +97,9 @@ template + + + // Put back together +- unpackedFloat multiplyResult(multiplySign, alignedExponent, alignedSignificand); +- +- + fpt extendedFormat(format.exponentWidth() + 1, format.significandWidth() * 2); ++ unpackedFloat multiplyResult(extendedFormat, multiplySign, alignedExponent, alignedSignificand); ++ + POSTCONDITION(multiplyResult.valid(extendedFormat)); + + return multiplyResult; +diff --git a/core/operations.h b/core/operations.h +index 84b0263..15ead9e 100644 +--- a/core/operations.h ++++ b/core/operations.h +@@ -24,6 +24,24 @@ + + namespace symfpu { + ++ /*** Size matching ***/ ++ template ++ std::pair sameWidth(const bv &op1, const bv &op2) { ++ typename t::bwt w1(op1.getWidth()); ++ typename t::bwt w2(op2.getWidth()); ++ ++ if (w1 == w2) { ++ return std::make_pair(op1, op2); ++ } else if (w1 < w2) { ++ return std::make_pair(op1.matchWidth(op2), op2); ++ } else { ++ INVARIANT(w1 > w2); ++ return std::make_pair(op1.matchWidth(op2), op2); ++ } ++ } ++ ++ ++ + /*** Expanding operations ***/ + template + bv expandingAdd (const bv &op1, const bv &op2) { +diff --git a/core/packing.h b/core/packing.h +index b93d884..65a97c4 100644 +--- a/core/packing.h ++++ b/core/packing.h +@@ -102,7 +102,7 @@ namespace symfpu { + ubv packedSign(uf.getSign()); + + // Exponent +- bwt packedExWidth = format.packedExponentWidth(); ++ bwt packedExWidth(format.packedExponentWidth()); + + prop inNormalRange(uf.inNormalRange(format, prop(true))); + INVARIANT(inNormalRange || uf.inSubnormalRange(format, prop(true))); // Default values ensure this. +@@ -131,10 +131,18 @@ namespace symfpu { + // Significand + bwt packedSigWidth = format.packedSignificandWidth(); + ubv unpackedSignificand(uf.getSignificand()); ++ bwt unpackedSignificandWidth = unpackedSignificand.getWidth(); + +- INVARIANT(packedSigWidth == unpackedSignificand.getWidth() - 1); ++ INVARIANT(packedSigWidth == unpackedSignificandWidth - 1); + ubv dropLeadingOne(unpackedSignificand.extract(packedSigWidth - 1,0)); +- ubv correctedSubnormal((unpackedSignificand >> (uf.getSubnormalAmount(format).toUnsigned().matchWidth(unpackedSignificand))).extract(packedSigWidth - 1,0)); ++ ++ ubv subnormalShiftAmount(uf.getSubnormalAmount(format).toUnsigned()); ++ ubv shiftAmount((subnormalShiftAmount.getWidth() <= unpackedSignificandWidth) ? ++ subnormalShiftAmount.matchWidth(unpackedSignificand) : ++ subnormalShiftAmount.extract(unpackedSignificandWidth - 1, 0)); ++ // The extraction could loose data if exponent is much larger than the significand ++ // However getSubnormalAmount is between 0 and packedSigWidth so it should be safe ++ ubv correctedSubnormal((unpackedSignificand >> shiftAmount).extract(packedSigWidth - 1,0)); + + prop hasFixedSignificand(uf.getNaN() || uf.getInf() || uf.getZero()); + +diff --git a/core/rounder.h b/core/rounder.h +index 3eb4796..723f5be 100644 +--- a/core/rounder.h ++++ b/core/rounder.h +@@ -381,7 +381,14 @@ template + // Note that this is negative if normal, giving a full subnormal mask + // but the result will be ignored (see the next invariant) + +- ubv subnormalShiftPrepared(subnormalAmount.toUnsigned().matchWidth(extractedSignificand)); ++ ++ // Care is needed if the exponents are longer than the significands ++ // In the case when data is lost it is negative and not used ++ bwt extractedSignificandWidth(extractedSignificand.getWidth()); ++ ubv subnormalShiftPrepared((extractedSignificandWidth >= expWidth + 1) ? ++ subnormalAmount.toUnsigned().matchWidth(extractedSignificand) : ++ subnormalAmount.toUnsigned().extract(extractedSignificandWidth - 1, 0)); ++ + + // Compute masks + ubv subnormalMask(orderEncode(subnormalShiftPrepared)); // Invariant implies this if all ones, it will not be used +@@ -569,8 +576,11 @@ unpackedFloat originalRounder (const typename t::fpt &format, + prop aboveLimit(subnormalAmount >= sbv(expWidth, targetSignificandWidth)); // Will underflow + sbv subnormalShift(ITE((belowLimit || aboveLimit), sbv::zero(expWidth), subnormalAmount)); + // Optimisation : collar +- +- ubv subnormalShiftPrepared(subnormalShift.toUnsigned().extend(targetSignificandWidth - expWidth)); ++ ++ bwt extractedSignificandWidth(extractedSignificand.getWidth()); ++ ubv subnormalShiftPrepared((extractedSignificandWidth >= expWidth + 1) ? ++ subnormalAmount.toUnsigned().extend(targetSignificandWidth - expWidth) : ++ subnormalAmount.toUnsigned().extract(extractedSignificandWidth - 1, 0)); + ubv guardLocation(ubv::one(targetSignificandWidth) << subnormalShiftPrepared); + ubv stickyMask(guardLocation.decrement()); + +diff --git a/core/sqrt.h b/core/sqrt.h +index b7a1f65..d66206c 100644 +--- a/core/sqrt.h ++++ b/core/sqrt.h +@@ -91,12 +91,11 @@ template + + ubv finishedSignificand(sqrtd.result.append(ubv(sqrtd.remainderBit))); + +- unpackedFloat sqrtResult(sqrtSign, exponentHalved, finishedSignificand); +- +- + fpt extendedFormat(format.exponentWidth(), format.significandWidth() + 2); + // format.exponentWidth() - 1 should also be true but requires shrinking the exponent and + // then increasing it in the rounder ++ unpackedFloat sqrtResult(extendedFormat, sqrtSign, exponentHalved, finishedSignificand); ++ + POSTCONDITION(sqrtResult.valid(extendedFormat)); + + return sqrtResult; +diff --git a/core/unpackedFloat.h b/core/unpackedFloat.h +index 691b346..9e1be93 100644 +--- a/core/unpackedFloat.h ++++ b/core/unpackedFloat.h +@@ -102,6 +102,17 @@ namespace symfpu { + sign(s), exponent(exp), significand(signif) + {} + ++ // An intermediate point in some operations is producing a value in an extended ++ // format (for example, multiply produces an intermediate value with one extra exponent ++ // bit and double the number of significand bits). However the unpacked size of the ++ // significand is larger than the bits given by the format. This constructor does ++ // the appropriate extension so that what is constructed is a valid unpacked float ++ // in the given format. ++ unpackedFloat (const fpt &fmt, const prop &s, const sbv &exp, const ubv &signif) : ++ nan(false), inf(false), zero(false), ++ sign(s), exponent(exp.matchWidth(defaultExponent(fmt))), significand(signif) ++ {} ++ + unpackedFloat (const unpackedFloat &old) : + nan(old.nan), inf(old.inf), zero(old.zero), + sign(old.sign), exponent(old.exponent), significand(old.significand) +@@ -155,6 +166,8 @@ namespace symfpu { + + static bwt exponentWidth(const fpt &format) { + ++ // This calculation is a little more complex than you might think... ++ + // Note that there is one more exponent above 0 than there is + // below. This is the opposite of 2's compliment but this is not + // a problem because the highest packed exponent corresponds to +@@ -163,17 +176,33 @@ namespace symfpu { + // However we do need to increase it to allow subnormals (packed) + // to be normalised. + +- bwt width = format.exponentWidth(); ++ // The smallest exponent is: ++ // -2^(format.exponentWidth() - 1) - 2 - (format.significandWidth() - 1) ++ // ++ // We need an unpacked exponent width u such that ++ // -2^(u-1) <= -2^(format.exponentWidth() - 1) - 2 - (format.significandWidth() - 1) ++ // i.e. ++ // 2^(u-1) >= 2^(format.exponentWidth() - 1) + (format.significandWidth() - 3) ++ ++ bwt formatExponentWidth = format.exponentWidth(); ++ bwt formatSignificandWidth = format.significandWidth(); ++ ++ if (formatSignificandWidth <= 3) { ++ // Subnormals fit into the gap between minimum normal exponent and what is represenatble ++ // using a signed number ++ return formatExponentWidth; ++ } + +- // Could be improved to remove overflow concerns +- uint64_t minimumExponent = ((1 << (width - 1)) - 2) + (format.significandWidth() - 1); ++ bwt bitsNeededForSubnormals = bitsToRepresent(format.significandWidth() - 3); ++ if (bitsNeededForSubnormals < formatExponentWidth - 1) { ++ // Significand is short compared to exponent range, ++ // one extra bit should be sufficient ++ return formatExponentWidth + 1; + +- // Increase width until even the smallest subnormal can be normalised +- while ((1UL << (width - 1)) < minimumExponent) { +- ++width; ++ } else { ++ // Significand is long compared to exponent range ++ return bitsToRepresent((bwt(1) << (formatExponentWidth - 1)) + formatSignificandWidth - 3) + 1; + } +- +- return width; + } + + static bwt significandWidth(const fpt &format) { +@@ -409,7 +438,10 @@ namespace symfpu { + (subnormalAmount <= sbv(exWidth,sigWidth))); + + // Invariant implies this following steps do not loose data +- ubv mask(orderEncode(subnormalAmount.toUnsigned().matchWidth(significand))); ++ ubv mask(orderEncode((sigWidth >= exWidth) ? ++ subnormalAmount.toUnsigned().matchWidth(significand) : ++ subnormalAmount.toUnsigned().extract(sigWidth - 1, 0) ++ )); + + prop correctlyAbbreviated((mask & significand).isAllZeros()); + diff --git a/test/regress/CMakeLists.txt b/test/regress/CMakeLists.txt index 8773c3583..765b12e90 100644 --- a/test/regress/CMakeLists.txt +++ b/test/regress/CMakeLists.txt @@ -573,10 +573,14 @@ set(regress_0_tests regress0/fp/abs-unsound2.smt2 regress0/fp/down-cast-RNA.smt2 regress0/fp/ext-rew-test.smt2 + regress0/fp/from_ubv.smt2 regress0/fp/issue-5524.smt2 regress0/fp/issue3536.smt2 + regress0/fp/issue3582.smt2 regress0/fp/issue3619.smt2 regress0/fp/issue4277-assign-func.smt2 + regress0/fp/issue5511.smt2 + regress0/fp/issue6164.smt2 regress0/fp/rti_3_5_bug.smt2 regress0/fp/simple.smt2 regress0/fp/wrong-model.smt2 diff --git a/test/regress/regress0/fp/from_ubv.smt2 b/test/regress/regress0/fp/from_ubv.smt2 new file mode 100644 index 000000000..6939e478a --- /dev/null +++ b/test/regress/regress0/fp/from_ubv.smt2 @@ -0,0 +1,5 @@ +; EXPECT: unsat +(set-logic QF_FP) +(declare-const r RoundingMode) +(assert (distinct ((_ to_fp_unsigned 8 24) r (_ bv0 1)) (fp (_ bv0 1) (_ bv0 8) (_ bv0 23)))) +(check-sat) diff --git a/test/regress/regress0/fp/issue3582.smt2 b/test/regress/regress0/fp/issue3582.smt2 new file mode 100644 index 000000000..c06cdb110 --- /dev/null +++ b/test/regress/regress0/fp/issue3582.smt2 @@ -0,0 +1,6 @@ +; EXPECT: unsat +(set-logic QF_FP) +(declare-fun bv () (_ BitVec 1)) +(define-fun x () (_ FloatingPoint 11 53) ((_ to_fp_unsigned 11 53) RNE bv)) +(assert (fp.isNaN x)) +(check-sat) diff --git a/test/regress/regress0/fp/issue5511.smt2 b/test/regress/regress0/fp/issue5511.smt2 new file mode 100644 index 000000000..911db54ee --- /dev/null +++ b/test/regress/regress0/fp/issue5511.smt2 @@ -0,0 +1,5 @@ +; EXPECT: sat +(set-logic QF_FP) +(declare-fun a () (_ FloatingPoint 53 11)) +(assert (= a (_ +oo 53 11))) +(check-sat) diff --git a/test/regress/regress0/fp/issue6164.smt2 b/test/regress/regress0/fp/issue6164.smt2 new file mode 100644 index 000000000..3ec9f1594 --- /dev/null +++ b/test/regress/regress0/fp/issue6164.smt2 @@ -0,0 +1,6 @@ +; EXPECT: sat +; EXPECT: ((((_ to_fp 5 11) roundNearestTiesToAway (/ 1 10)) (fp #b0 #b01011 #b1001100110))) +(set-logic ALL) +(set-option :produce-models true) +(check-sat) +(get-value (((_ to_fp 5 11) RNA 0.1))) -- 2.30.2