From abc4a8285776dcded21d0b7f3035c9858d061611 Mon Sep 17 00:00:00 2001 From: Rhys Perry Date: Mon, 3 Feb 2020 21:19:52 +0000 Subject: [PATCH] nir: make fsat return 0.0 with NaN instead of passing it through This is how lower_fsat and ACO implements fsat and is a more useful definition since it can be exactly created from fmin(fmax(a, 0.0), 1.0). Signed-off-by: Rhys Perry Reviewed-by: Ian Romanick Part-of: --- src/compiler/nir/nir_opcodes.py | 4 +--- src/compiler/nir/nir_opt_algebraic.py | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/compiler/nir/nir_opcodes.py b/src/compiler/nir/nir_opcodes.py index 830fb346a4d..e1c9788b4f0 100644 --- a/src/compiler/nir/nir_opcodes.py +++ b/src/compiler/nir/nir_opcodes.py @@ -199,9 +199,7 @@ unop("fsign", tfloat, ("bit_size == 64 ? " + unop("isign", tint, "(src0 == 0) ? 0 : ((src0 > 0) ? 1 : -1)") unop("iabs", tint, "(src0 < 0) ? -src0 : src0") unop("fabs", tfloat, "fabs(src0)") -unop("fsat", tfloat, ("bit_size == 64 ? " + - "((src0 > 1.0) ? 1.0 : ((src0 <= 0.0) ? 0.0 : src0)) : " + - "((src0 > 1.0f) ? 1.0f : ((src0 <= 0.0f) ? 0.0f : src0))")) +unop("fsat", tfloat, ("fmin(fmax(src0, 0.0), 1.0)")) unop("frcp", tfloat, "bit_size == 64 ? 1.0 / src0 : 1.0f / src0") unop("frsq", tfloat, "bit_size == 64 ? 1.0 / sqrt(src0) : 1.0f / sqrtf(src0)") unop("fsqrt", tfloat, "bit_size == 64 ? sqrt(src0) : sqrtf(src0)") diff --git a/src/compiler/nir/nir_opt_algebraic.py b/src/compiler/nir/nir_opt_algebraic.py index 58ff35af0b0..4112ccb0aa4 100644 --- a/src/compiler/nir/nir_opt_algebraic.py +++ b/src/compiler/nir/nir_opt_algebraic.py @@ -543,9 +543,17 @@ optimizations.extend([ (('fmax', a, ('fneg', a)), ('fabs', a)), (('imax', a, ('ineg', a)), ('iabs', a)), (('~fmax', ('fabs', a), 0.0), ('fabs', a)), - (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'), + (('fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'), + # fmax(fmin(a, 1.0), 0.0) is inexact because it returns 1.0 on NaN, while + # fsat(a) returns 0.0. (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'), + # fmin(fmax(a, -1.0), 0.0) is inexact because it returns -1.0 on NaN, while + # fneg(fsat(fneg(a))) returns -0.0 on NaN. (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'), + # fmax(fmin(a, 0.0), -1.0) is inexact because it returns 0.0 on NaN, while + # fneg(fsat(fneg(a))) returns -0.0 on NaN. This only matters if + # SignedZeroInfNanPreserve is set, but we don't currently have any way of + # representing this in the optimizations other than the usual ~. (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'), (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))), (('fsat', ('b2f', a)), ('b2f', a)), @@ -557,8 +565,11 @@ optimizations.extend([ (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)), (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)), (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)), + # Both the left and right patterns are "b" when isnan(a), so this is exact. (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))), - (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))), + # The left pattern is 0.0 when isnan(a) (because fmin(fsat(NaN), b) -> + # fmin(0.0, b)) while the right one is "b", so this optimization is inexact. + (('~fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))), # If a in [0,b] then b-a is also in [0,b]. Since b in [0,1], max(b-a, 0) = # fsat(b-a). -- 2.30.2