X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_opt_algebraic.py;h=ec8929a6d9f00740d1e3ccb2ae31579393c77a08;hb=18c8b927e205d7c8f2a04377b1fa3c4242074de1;hp=50d37ea37f10a16245e469215ca7fbcb74f55066;hpb=68f8c5730bef7b93fd235ae2f3c87cc9403b5b4c;p=mesa.git diff --git a/src/compiler/nir/nir_opt_algebraic.py b/src/compiler/nir/nir_opt_algebraic.py index 50d37ea37f1..ec8929a6d9f 100644 --- a/src/compiler/nir/nir_opt_algebraic.py +++ b/src/compiler/nir/nir_opt_algebraic.py @@ -34,10 +34,17 @@ d = 'd' # Written in the form (, ) where is an expression # and is either an expression or a value. An expression is -# defined as a tuple of the form (, , , , ) +# defined as a tuple of the form ([~], , , , ) # where each source is either an expression or a value. A value can be # either a numeric constant or a string representing a variable name. # +# If the opcode in a search expression is prefixed by a '~' character, this +# indicates that the operation is inexact. Such operations will only get +# applied to SSA values that do not have the exact bit set. This should be +# used by by any optimizations that are not bit-for-bit exact. It should not, +# however, be used for backend-requested lowering operations as those need to +# happen regardless of precision. +# # Variable names are specified as "[#]name[@type]" where "#" inicates that # the given variable will only match constants and the type indicates that # the given variable will only match values from ALU instructions with the @@ -54,19 +61,19 @@ optimizations = [ (('fabs', ('fneg', a)), ('fabs', a)), (('iabs', ('iabs', a)), ('iabs', a)), (('iabs', ('ineg', a)), ('iabs', a)), - (('fadd', a, 0.0), a), + (('~fadd', a, 0.0), a), (('iadd', a, 0), a), (('usadd_4x8', a, 0), a), (('usadd_4x8', a, ~0), ~0), - (('fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))), + (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))), (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))), - (('fadd', ('fneg', a), a), 0.0), + (('~fadd', ('fneg', a), a), 0.0), (('iadd', ('ineg', a), a), 0), (('iadd', ('ineg', a), ('iadd', a, b)), b), (('iadd', a, ('iadd', ('ineg', a), b)), b), - (('fadd', ('fneg', a), ('fadd', a, b)), b), - (('fadd', a, ('fadd', ('fneg', a), b)), b), - (('fmul', a, 0.0), 0.0), + (('~fadd', ('fneg', a), ('fadd', a, b)), b), + (('~fadd', a, ('fadd', ('fneg', a), b)), b), + (('~fmul', a, 0.0), 0.0), (('imul', a, 0), 0), (('umul_unorm_4x8', a, 0), 0), (('umul_unorm_4x8', a, ~0), a), @@ -74,50 +81,74 @@ optimizations = [ (('imul', a, 1), a), (('fmul', a, -1.0), ('fneg', a)), (('imul', a, -1), ('ineg', a)), - (('ffma', 0.0, a, b), b), - (('ffma', a, 0.0, b), b), - (('ffma', a, b, 0.0), ('fmul', a, b)), + (('~ffma', 0.0, a, b), b), + (('~ffma', a, 0.0, b), b), + (('~ffma', a, b, 0.0), ('fmul', a, b)), (('ffma', a, 1.0, b), ('fadd', a, b)), (('ffma', 1.0, a, b), ('fadd', a, b)), - (('flrp', a, b, 0.0), a), - (('flrp', a, b, 1.0), b), - (('flrp', a, a, b), a), - (('flrp', 0.0, a, b), ('fmul', a, b)), + (('~flrp', a, b, 0.0), a), + (('~flrp', a, b, 1.0), b), + (('~flrp', a, a, b), a), + (('~flrp', 0.0, a, b), ('fmul', a, b)), + (('~flrp', a, b, ('b2f', c)), ('bcsel', c, b, a), 'options->lower_flrp'), (('flrp', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp'), (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'), - (('fadd', ('fmul', a, ('fadd', 1.0, ('fneg', c))), ('fmul', b, c)), ('flrp', a, b, c), '!options->lower_flrp'), - (('fadd', a, ('fmul', c, ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp'), + (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', c)))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp'), + (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', c ))), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp'), + (('~fadd', a, ('fmul', ('b2f', c), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp'), + (('~fadd', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp'), (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'), - (('fadd', ('fmul', a, b), c), ('ffma', a, b, c), '!options->lower_ffma'), + (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), '!options->lower_ffma'), # Comparison simplifications - (('inot', ('flt', a, b)), ('fge', a, b)), - (('inot', ('fge', a, b)), ('flt', a, b)), - (('inot', ('feq', a, b)), ('fne', a, b)), - (('inot', ('fne', a, b)), ('feq', a, b)), + (('~inot', ('flt', a, b)), ('fge', a, b)), + (('~inot', ('fge', a, b)), ('flt', a, b)), + (('~inot', ('feq', a, b)), ('fne', a, b)), + (('~inot', ('fne', a, b)), ('feq', a, b)), (('inot', ('ilt', a, b)), ('ige', a, b)), (('inot', ('ige', a, b)), ('ilt', a, b)), (('inot', ('ieq', a, b)), ('ine', a, b)), (('inot', ('ine', a, b)), ('ieq', a, b)), + + # 0.0 >= b2f(a) + # b2f(a) <= 0.0 + # b2f(a) == 0.0 because b2f(a) can only be 0 or 1 + # inot(a) + (('fge', 0.0, ('b2f', a)), ('inot', a)), + + # 0.0 < fabs(a) + # fabs(a) > 0.0 + # fabs(a) != 0.0 because fabs(a) must be >= 0 + # a != 0.0 + (('flt', 0.0, ('fabs', a)), ('fne', a, 0.0)), + (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)), - (('bcsel', ('flt', a, b), a, b), ('fmin', a, b)), + (('bcsel', ('flt', b, a), b, a), ('fmin', a, b)), (('bcsel', ('flt', a, b), b, a), ('fmax', a, b)), (('bcsel', ('inot', 'a@bool'), b, c), ('bcsel', a, c, b)), (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)), + (('bcsel', a, True, 'b@bool'), ('ior', a, b)), (('fmin', a, a), a), (('fmax', a, a), a), (('imin', a, a), a), (('imax', a, a), a), (('umin', a, a), a), (('umax', a, a), a), - (('fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'), - (('fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'), + (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'), + (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'), (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'), (('fsat', ('fsat', a)), ('fsat', a)), - (('fmin', ('fmax', ('fmin', ('fmax', a, 0.0), 1.0), 0.0), 1.0), ('fmin', ('fmax', a, 0.0), 1.0)), - (('ior', ('flt', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))), - (('ior', ('flt', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)), - (('ior', ('fge', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))), - (('ior', ('fge', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)), + (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)), + (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)), + (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)), + (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)), + (('~ior', ('flt', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))), + (('~ior', ('flt', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)), + (('~ior', ('fge', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))), + (('~ior', ('fge', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)), + (('fabs', ('slt', a, b)), ('slt', a, b)), + (('fabs', ('sge', a, b)), ('sge', a, b)), + (('fabs', ('seq', a, b)), ('seq', a, b)), + (('fabs', ('sne', a, b)), ('sne', a, b)), (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'), (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'), (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'), @@ -149,6 +180,7 @@ optimizations = [ (('ior', a, 0), a), (('fxor', a, a), 0.0), (('ixor', a, a), 0), + (('ixor', a, 0), a), (('inot', ('inot', a)), a), # DeMorgan's Laws (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))), @@ -160,34 +192,38 @@ optimizations = [ (('ishr', a, 0), a), (('ushr', 0, a), 0), (('ushr', a, 0), a), + (('iand', 0xff, ('ushr', a, 24)), ('ushr', a, 24)), + (('iand', 0xffff, ('ushr', a, 16)), ('ushr', a, 16)), # Exponential/logarithmic identities - (('fexp2', ('flog2', a)), a), # 2^lg2(a) = a - (('flog2', ('fexp2', a)), a), # lg2(2^a) = a + (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a + (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b) - (('fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b - (('fpow', a, 1.0), a), - (('fpow', a, 2.0), ('fmul', a, a)), - (('fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))), - (('fpow', 2.0, a), ('fexp2', a)), - (('fpow', ('fpow', a, 2.2), 0.454545), a), - (('fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)), - (('fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))), - (('frcp', ('fexp2', a)), ('fexp2', ('fneg', a))), - (('frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))), - (('flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))), - (('flog2', ('frcp', a)), ('fneg', ('flog2', a))), - (('flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))), - (('flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))), - (('fadd', ('flog2', a), ('flog2', b)), ('flog2', ('fmul', a, b))), - (('fadd', ('flog2', a), ('fneg', ('flog2', b))), ('flog2', ('fdiv', a, b))), - (('fmul', ('fexp2', a), ('fexp2', b)), ('fexp2', ('fadd', a, b))), + (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b + (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))), + ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d + (('~fpow', a, 1.0), a), + (('~fpow', a, 2.0), ('fmul', a, a)), + (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))), + (('~fpow', 2.0, a), ('fexp2', a)), + (('~fpow', ('fpow', a, 2.2), 0.454545), a), + (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)), + (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))), + (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))), + (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))), + (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))), + (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))), + (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))), + (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))), + (('~fadd', ('flog2', a), ('flog2', b)), ('flog2', ('fmul', a, b))), + (('~fadd', ('flog2', a), ('fneg', ('flog2', b))), ('flog2', ('fdiv', a, b))), + (('~fmul', ('fexp2', a), ('fexp2', b)), ('fexp2', ('fadd', a, b))), # Division and reciprocal - (('fdiv', 1.0, a), ('frcp', a)), + (('~fdiv', 1.0, a), ('frcp', a)), (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'), - (('frcp', ('frcp', a)), a), - (('frcp', ('fsqrt', a)), ('frsq', a)), + (('~frcp', ('frcp', a)), a), + (('~frcp', ('fsqrt', a)), ('frsq', a)), (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'), - (('frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'), + (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'), # Boolean simplifications (('ieq', 'a@bool', True), a), (('ine', 'a@bool', True), ('inot', a)), @@ -208,9 +244,23 @@ optimizations = [ (('i2b', ('b2i', a)), a), (('f2i', ('ftrunc', a)), ('f2i', a)), (('f2u', ('ftrunc', a)), ('f2u', a)), + (('i2b', ('ineg', a)), ('i2b', a)), + (('i2b', ('iabs', a)), ('i2b', a)), + (('fabs', ('b2f', a)), ('b2f', a)), + (('iabs', ('b2i', a)), ('b2i', a)), + + # Byte extraction + (('ushr', a, 24), ('extract_u8', a, 3), '!options->lower_extract_byte'), + (('iand', 0xff, ('ushr', a, 16)), ('extract_u8', a, 2), '!options->lower_extract_byte'), + (('iand', 0xff, ('ushr', a, 8)), ('extract_u8', a, 1), '!options->lower_extract_byte'), + (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte'), + + # Word extraction + (('ushr', a, 16), ('extract_u16', a, 1), '!options->lower_extract_word'), + (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'), # Subtracts - (('fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)), + (('~fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)), (('isub', a, ('isub', 0, b)), ('iadd', a, b)), (('ussub_4x8', a, 0), a), (('ussub_4x8', a, ~0), 0), @@ -218,11 +268,23 @@ optimizations = [ (('isub', a, b), ('iadd', a, ('ineg', b)), 'options->lower_sub'), (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'), (('ineg', a), ('isub', 0, a), 'options->lower_negate'), - (('fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)), + (('~fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)), (('iadd', a, ('isub', 0, b)), ('isub', a, b)), (('fabs', ('fsub', 0.0, a)), ('fabs', a)), (('iabs', ('isub', 0, a)), ('iabs', a)), + # Propagate negation up multiplication chains + (('fmul', ('fneg', a), b), ('fneg', ('fmul', a, b))), + (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))), + + # Reassociate constants in add/mul chains so they can be folded together. + # For now, we only handle cases where the constants are separated by + # a single non-constant. We could do better eventually. + (('~fmul', '#a', ('fmul', b, '#c')), ('fmul', ('fmul', a, c), b)), + (('imul', '#a', ('imul', b, '#c')), ('imul', ('imul', a, c), b)), + (('~fadd', '#a', ('fadd', b, '#c')), ('fadd', ('fadd', a, c), b)), + (('iadd', '#a', ('iadd', b, '#c')), ('iadd', ('iadd', a, c), b)), + # Misc. lowering (('fmod', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'), (('uadd_carry', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'), @@ -258,8 +320,69 @@ optimizations = [ (('extract_u16', a, b), ('iand', ('ushr', a, ('imul', b, 16)), 0xffff), 'options->lower_extract_word'), + + (('pack_unorm_2x16', 'v'), + ('pack_uvec2_to_uint', + ('f2u', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))), + 'options->lower_pack_unorm_2x16'), + + (('pack_unorm_4x8', 'v'), + ('pack_uvec4_to_uint', + ('f2u', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))), + 'options->lower_pack_unorm_4x8'), + + (('pack_snorm_2x16', 'v'), + ('pack_uvec2_to_uint', + ('f2i', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))), + 'options->lower_pack_snorm_2x16'), + + (('pack_snorm_4x8', 'v'), + ('pack_uvec4_to_uint', + ('f2i', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))), + 'options->lower_pack_snorm_4x8'), + + (('unpack_unorm_2x16', 'v'), + ('fdiv', ('u2f', ('vec2', ('extract_u16', 'v', 0), + ('extract_u16', 'v', 1))), + 65535.0), + 'options->lower_unpack_unorm_2x16'), + + (('unpack_unorm_4x8', 'v'), + ('fdiv', ('u2f', ('vec4', ('extract_u8', 'v', 0), + ('extract_u8', 'v', 1), + ('extract_u8', 'v', 2), + ('extract_u8', 'v', 3))), + 255.0), + 'options->lower_unpack_unorm_4x8'), + + (('unpack_snorm_2x16', 'v'), + ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0), + ('extract_i16', 'v', 1))), + 32767.0))), + 'options->lower_unpack_snorm_2x16'), + + (('unpack_snorm_4x8', 'v'), + ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0), + ('extract_i8', 'v', 1), + ('extract_i8', 'v', 2), + ('extract_i8', 'v', 3))), + 127.0))), + 'options->lower_unpack_snorm_4x8'), ] +# Unreal Engine 4 demo applications open-codes bitfieldReverse() +def bitfield_reverse(u): + step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16)) + step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8)) + step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4)) + step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2)) + step5 = ('ior', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1)) + + return step5 + +optimizations += [(bitfield_reverse('x'), ('bitfield_reverse', 'x'))] + + # Add optimizations to handle the case where the result of a ternary is # compared to a constant. This way we can take things like # @@ -286,10 +409,13 @@ for op in ['flt', 'fge', 'feq', 'fne', # they help code generation but do not necessarily produce code that is # more easily optimizable. late_optimizations = [ + # Most of these optimizations aren't quite safe when you get infinity or + # Nan involved but the first one should be fine. (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))), - (('fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))), - (('feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))), - (('fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))), + (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))), + (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))), + (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))), + (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'), (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'), (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),