amd: Swap from nir_opt_shrink_load() to nir_opt_shrink_vectors().
authorEric Anholt <eric@anholt.net>
Thu, 23 Jul 2020 05:00:57 +0000 (22:00 -0700)
committerMarge Bot <eric+marge@anholt.net>
Mon, 3 Aug 2020 21:26:45 +0000 (21:26 +0000)
This should do much more trimming than shrink_load, and is a win on i965's
vec4 and nir-to-tgsi.  For scalar backends like this that don't need ALU
shrinking, it still gets more load intrinsics covered.

Reviewed-by: Rhys Perry <pendingchaos02@gmail.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6050>

src/amd/compiler/aco_instruction_selection_setup.cpp
src/amd/vulkan/radv_shader.c

index d5e5eb47574de9e92dd983306eb1f8057007f066..799797f3e3500cefb7fb144d90e3e4fa81f7c720 100644 (file)
@@ -1344,6 +1344,8 @@ setup_nir(isel_context *ctx, nir_shader *nir)
    if (nir->info.stage != MESA_SHADER_COMPUTE)
       nir_lower_io(nir, (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out), type_size, (nir_lower_io_options)0);
 
+   lower_to_scalar |= nir_opt_shrink_vectors(nir);
+
    if (lower_to_scalar)
       nir_lower_alu_to_scalar(nir, NULL, NULL);
    if (lower_pack)
@@ -1385,7 +1387,6 @@ setup_nir(isel_context *ctx, nir_shader *nir)
 
    /* cleanup passes */
    nir_lower_load_const_to_scalar(nir);
-   nir_opt_shrink_load(nir);
    nir_move_options move_opts = (nir_move_options)(
       nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
       nir_move_comparisons | nir_move_copies);
index e499e6ea2343079c6d3f6b0011e8470c89217019..e279260805150213a475b976a3f597c51a94d205 100644 (file)
@@ -284,7 +284,7 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
         } while (progress && !optimize_conservatively);
 
        NIR_PASS(progress, shader, nir_opt_conditional_discard);
-        NIR_PASS(progress, shader, nir_opt_shrink_load);
+        NIR_PASS(progress, shader, nir_opt_shrink_vectors);
         NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
 }