From d41cdef2a591930ad848b27f936aafcafc93b308 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 18 Aug 2018 16:42:04 -0700 Subject: [PATCH] nir: Use the flrp lowering pass instead of nir_opt_algebraic MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit I tried to be very careful while updating all the various drivers, but I don't have any of that hardware for testing. :( i965 is the only platform that sets always_precise = true, and it is only set true for fragment shaders. Gen4 and Gen5 both set lower_flrp32 only for vertex shaders. For fragment shaders, nir_op_flrp is lowered during code generation as a(1-c)+bc. On all other platforms 64-bit nir_op_flrp and on Gen11 32-bit nir_op_flrp are lowered using the old nir_opt_algebraic method. No changes on any other Intel platforms. v2: Add panfrost changes. Iron Lake and GM45 had similar results. (Iron Lake shown) total cycles in shared programs: 188647754 -> 188647748 (<.01%) cycles in affected programs: 5096 -> 5090 (-0.12%) helped: 3 HURT: 0 helped stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2 helped stats (rel) min: 0.12% max: 0.12% x̄: 0.12% x̃: 0.12% Reviewed-by: Matt Turner --- src/amd/vulkan/radv_shader.c | 25 +++++++++++++++++++ src/broadcom/compiler/nir_to_vir.c | 23 +++++++++++++++++ src/compiler/nir/nir_opt_algebraic.py | 3 --- src/freedreno/ir3/ir3_nir.c | 21 ++++++++++++++++ .../panfrost/midgard/midgard_compile.c | 25 +++++++++++++++++++ src/gallium/drivers/radeonsi/si_shader_nir.c | 24 ++++++++++++++++++ src/gallium/drivers/vc4/vc4_program.c | 22 ++++++++++++++++ src/intel/compiler/brw_nir.c | 23 +++++++++++++++++ src/mesa/state_tracker/st_glsl_to_nir.cpp | 24 ++++++++++++++++++ 9 files changed, 187 insertions(+), 3 deletions(-) diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index 648fb6586f7..7568d59056c 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -124,6 +124,10 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively, bool allow_copies) { bool progress; + unsigned lower_flrp = + (shader->options->lower_flrp16 ? 16 : 0) | + (shader->options->lower_flrp32 ? 32 : 0) | + (shader->options->lower_flrp64 ? 64 : 0); do { progress = false; @@ -164,6 +168,27 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively, NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, shader, nir_opt_algebraic); NIR_PASS(progress, shader, nir_opt_constant_folding); + + if (lower_flrp != 0) { + bool lower_flrp_progress; + NIR_PASS(lower_flrp_progress, + shader, + nir_lower_flrp, + lower_flrp, + false /* always_precise */, + shader->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, shader, + nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, shader, nir_opt_undef); NIR_PASS(progress, shader, nir_opt_conditional_discard); if (shader->options->max_unroll_iterations) { diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c index 1556dbee45e..3e82f61d6ea 100644 --- a/src/broadcom/compiler/nir_to_vir.c +++ b/src/broadcom/compiler/nir_to_vir.c @@ -1301,6 +1301,10 @@ void v3d_optimize_nir(struct nir_shader *s) { bool progress; + unsigned lower_flrp = + (s->options->lower_flrp16 ? 16 : 0) | + (s->options->lower_flrp32 ? 32 : 0) | + (s->options->lower_flrp64 ? 64 : 0); do { progress = false; @@ -1316,6 +1320,25 @@ v3d_optimize_nir(struct nir_shader *s) NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, s, nir_opt_algebraic); NIR_PASS(progress, s, nir_opt_constant_folding); + + if (lower_flrp != 0) { + bool lower_flrp_progress; + + NIR_PASS(lower_flrp_progress, s, nir_lower_flrp, + lower_flrp, + false /* always_precise */, + s->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, s, nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, s, nir_opt_undef); } while (progress); diff --git a/src/compiler/nir/nir_opt_algebraic.py b/src/compiler/nir/nir_opt_algebraic.py index 53beb4a8546..6379a399431 100644 --- a/src/compiler/nir/nir_opt_algebraic.py +++ b/src/compiler/nir/nir_opt_algebraic.py @@ -143,9 +143,6 @@ optimizations = [ (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'), (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)), - (('flrp@16', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp16'), - (('flrp@32', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp32'), - (('flrp@64', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp64'), (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'), (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'), (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'), diff --git a/src/freedreno/ir3/ir3_nir.c b/src/freedreno/ir3/ir3_nir.c index 3bc22fe5be5..744fd958fc6 100644 --- a/src/freedreno/ir3/ir3_nir.c +++ b/src/freedreno/ir3/ir3_nir.c @@ -112,6 +112,11 @@ static void ir3_optimize_loop(nir_shader *s) { bool progress; + unsigned lower_flrp = + (s->options->lower_flrp16 ? 16 : 0) | + (s->options->lower_flrp32 ? 32 : 0) | + (s->options->lower_flrp64 ? 64 : 0); + do { progress = false; @@ -135,6 +140,22 @@ ir3_optimize_loop(nir_shader *s) progress |= OPT(s, nir_opt_intrinsics); progress |= OPT(s, nir_opt_algebraic); progress |= OPT(s, nir_opt_constant_folding); + + if (lower_flrp != 0) { + if (OPT(s, nir_lower_flrp, + lower_flrp, + false /* always_precise */, + s->options->lower_ffma)) { + OPT(s, nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + progress |= OPT(s, nir_opt_dead_cf); if (OPT(s, nir_opt_trivial_continues)) { progress |= true; diff --git a/src/gallium/drivers/panfrost/midgard/midgard_compile.c b/src/gallium/drivers/panfrost/midgard/midgard_compile.c index 29f3ce7ff71..9c7928decf6 100644 --- a/src/gallium/drivers/panfrost/midgard/midgard_compile.c +++ b/src/gallium/drivers/panfrost/midgard/midgard_compile.c @@ -885,6 +885,10 @@ static void optimise_nir(nir_shader *nir) { bool progress; + unsigned lower_flrp = + (nir->options->lower_flrp16 ? 16 : 0) | + (nir->options->lower_flrp32 ? 32 : 0) | + (nir->options->lower_flrp64 ? 64 : 0); NIR_PASS(progress, nir, nir_lower_regs_to_ssa); NIR_PASS(progress, nir, midgard_nir_lower_fdot2); @@ -909,6 +913,27 @@ optimise_nir(nir_shader *nir) NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true); NIR_PASS(progress, nir, nir_opt_algebraic); NIR_PASS(progress, nir, nir_opt_constant_folding); + + if (lower_flrp != 0) { + bool lower_flrp_progress; + NIR_PASS(lower_flrp_progress, + nir, + nir_lower_flrp, + lower_flrp, + false /* always_precise */, + nir->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, nir, + nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, nir, nir_opt_undef); NIR_PASS(progress, nir, nir_opt_loop_unroll, nir_var_shader_in | diff --git a/src/gallium/drivers/radeonsi/si_shader_nir.c b/src/gallium/drivers/radeonsi/si_shader_nir.c index 87100fbed19..afc8d5f3b78 100644 --- a/src/gallium/drivers/radeonsi/si_shader_nir.c +++ b/src/gallium/drivers/radeonsi/si_shader_nir.c @@ -815,6 +815,11 @@ void si_nir_opts(struct nir_shader *nir) { bool progress; + unsigned lower_flrp = + (sel->nir->options->lower_flrp16 ? 16 : 0) | + (sel->nir->options->lower_flrp32 ? 32 : 0) | + (sel->nir->options->lower_flrp64 ? 64 : 0); + do { progress = false; @@ -844,6 +849,25 @@ si_nir_opts(struct nir_shader *nir) NIR_PASS(progress, nir, nir_opt_algebraic); NIR_PASS(progress, nir, nir_opt_constant_folding); + if (lower_flrp != 0) { + bool lower_flrp_progress; + + NIR_PASS(lower_flrp_progress, sel->nir, nir_lower_flrp, + lower_flrp, + false /* always_precise */, + sel->nir->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, sel->nir, + nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, nir, nir_opt_undef); NIR_PASS(progress, nir, nir_opt_conditional_discard); if (nir->options->max_unroll_iterations) { diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index 2ca3f907135..a2af55d6421 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -1527,6 +1527,10 @@ static void vc4_optimize_nir(struct nir_shader *s) { bool progress; + unsigned lower_flrp = + (s->options->lower_flrp16 ? 16 : 0) | + (s->options->lower_flrp32 ? 32 : 0) | + (s->options->lower_flrp64 ? 64 : 0); do { progress = false; @@ -1542,6 +1546,24 @@ vc4_optimize_nir(struct nir_shader *s) NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, s, nir_opt_algebraic); NIR_PASS(progress, s, nir_opt_constant_folding); + if (lower_flrp != 0) { + bool lower_flrp_progress; + + NIR_PASS(lower_flrp_progress, s, nir_lower_flrp, + lower_flrp, + false /* always_precise */, + s->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, s, nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, s, nir_opt_undef); NIR_PASS(progress, s, nir_opt_loop_unroll, nir_var_shader_in | diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index 4a1fbf08c97..7e6f9d2d436 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -538,6 +538,11 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler, brw_nir_no_indirect_mask(compiler, nir->info.stage); bool progress; + unsigned lower_flrp = + (nir->options->lower_flrp16 ? 16 : 0) | + (nir->options->lower_flrp32 ? 32 : 0) | + (nir->options->lower_flrp64 ? 64 : 0); + do { progress = false; OPT(nir_split_array_vars, nir_var_function_temp); @@ -598,6 +603,24 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler, OPT(nir_opt_idiv_const, 32); OPT(nir_opt_algebraic); OPT(nir_opt_constant_folding); + + if (lower_flrp != 0) { + /* To match the old behavior, set always_precise only for scalar + * shader stages. + */ + if (OPT(nir_lower_flrp, + lower_flrp, + is_scalar /* always_precise */, + compiler->devinfo->gen >= 6)) { + OPT(nir_opt_constant_folding); + } + + /* Nothing should rematerialize any flrps, so we only need to do this + * lowering once. + */ + lower_flrp = 0; + } + OPT(nir_opt_dead_cf); if (OPT(nir_opt_trivial_continues)) { /* If nir_opt_trivial_continues makes progress, then we need to clean diff --git a/src/mesa/state_tracker/st_glsl_to_nir.cpp b/src/mesa/state_tracker/st_glsl_to_nir.cpp index 97b2831b880..0a67d4532eb 100644 --- a/src/mesa/state_tracker/st_glsl_to_nir.cpp +++ b/src/mesa/state_tracker/st_glsl_to_nir.cpp @@ -304,6 +304,11 @@ void st_nir_opts(nir_shader *nir, bool scalar) { bool progress; + unsigned lower_flrp = + (nir->options->lower_flrp16 ? 16 : 0) | + (nir->options->lower_flrp32 ? 32 : 0) | + (nir->options->lower_flrp64 ? 64 : 0); + do { progress = false; @@ -332,6 +337,25 @@ st_nir_opts(nir_shader *nir, bool scalar) NIR_PASS(progress, nir, nir_opt_algebraic); NIR_PASS(progress, nir, nir_opt_constant_folding); + if (lower_flrp != 0) { + bool lower_flrp_progress; + + NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp, + lower_flrp, + false /* always_precise */, + nir->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, nir, + nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only need to do this + * lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, nir, nir_opt_undef); NIR_PASS(progress, nir, nir_opt_conditional_discard); if (nir->options->max_unroll_iterations) { -- 2.30.2