nir: allow specifying a set of opcodes in lower_alu_to_scalar
[mesa.git] / src / freedreno / ir3 / ir3_nir.c
index 3bc22fe5be5917be2741f237fb11386f01b30b07..8d2eef94e57cbd8d8571c98040a99d8b544fd404 100644 (file)
 
 
 #include "util/debug.h"
+#include "util/u_math.h"
 
 #include "ir3_nir.h"
 #include "ir3_compiler.h"
 #include "ir3_shader.h"
 
+static void ir3_setup_const_state(struct ir3_shader *shader, nir_shader *nir);
+
 static const nir_shader_compiler_options options = {
                .lower_fpow = true,
                .lower_scmp = true,
@@ -112,13 +115,18 @@ static void
 ir3_optimize_loop(nir_shader *s)
 {
        bool progress;
+       unsigned lower_flrp =
+               (s->options->lower_flrp16 ? 16 : 0) |
+               (s->options->lower_flrp32 ? 32 : 0) |
+               (s->options->lower_flrp64 ? 64 : 0);
+
        do {
                progress = false;
 
                OPT_V(s, nir_lower_vars_to_ssa);
                progress |= OPT(s, nir_opt_copy_prop_vars);
                progress |= OPT(s, nir_opt_dead_write_vars);
-               progress |= OPT(s, nir_lower_alu_to_scalar);
+               progress |= OPT(s, nir_lower_alu_to_scalar, NULL);
                progress |= OPT(s, nir_lower_phis_to_scalar);
 
                progress |= OPT(s, nir_copy_prop);
@@ -135,6 +143,22 @@ ir3_optimize_loop(nir_shader *s)
                progress |= OPT(s, nir_opt_intrinsics);
                progress |= OPT(s, nir_opt_algebraic);
                progress |= OPT(s, nir_opt_constant_folding);
+
+               if (lower_flrp != 0) {
+                       if (OPT(s, nir_lower_flrp,
+                                       lower_flrp,
+                                       false /* always_precise */,
+                                       s->options->lower_ffma)) {
+                               OPT(s, nir_opt_constant_folding);
+                               progress = true;
+                       }
+
+                       /* Nothing should rematerialize any flrps, so we only
+                        * need to do this lowering once.
+                        */
+                       lower_flrp = 0;
+               }
+
                progress |= OPT(s, nir_opt_dead_cf);
                if (OPT(s, nir_opt_trivial_continues)) {
                        progress |= true;
@@ -252,12 +276,20 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
 
        nir_sweep(s);
 
+       /* The first time thru, when not creating variant, do the one-time
+        * const_state layout setup.  This should be done after ubo range
+        * analysis.
+        */
+       if (!key) {
+               ir3_setup_const_state(shader, s);
+       }
+
        return s;
 }
 
-void
+static void
 ir3_nir_scan_driver_consts(nir_shader *shader,
-               struct ir3_driver_const_layout *layout)
+               struct ir3_const_state *layout)
 {
        nir_foreach_function(function, shader) {
                if (!function->impl)
@@ -307,3 +339,57 @@ ir3_nir_scan_driver_consts(nir_shader *shader,
                }
        }
 }
+
+static void
+ir3_setup_const_state(struct ir3_shader *shader, nir_shader *nir)
+{
+       struct ir3_compiler *compiler = shader->compiler;
+       struct ir3_const_state *const_state = &shader->const_state;
+
+       memset(&const_state->offsets, ~0, sizeof(const_state->offsets));
+
+       ir3_nir_scan_driver_consts(nir, const_state);
+
+       const_state->num_uniforms = nir->num_uniforms;
+       const_state->num_ubos = nir->info.num_ubos;
+
+       debug_assert((shader->ubo_state.size % 16) == 0);
+       unsigned constoff = align(shader->ubo_state.size / 16, 4);
+       unsigned ptrsz = ir3_pointer_size(compiler);
+
+       if (const_state->num_ubos > 0) {
+               const_state->offsets.ubo = constoff;
+               constoff += align(nir->info.num_ubos * ptrsz, 4) / 4;
+       }
+
+       if (const_state->ssbo_size.count > 0) {
+               unsigned cnt = const_state->ssbo_size.count;
+               const_state->offsets.ssbo_sizes = constoff;
+               constoff += align(cnt, 4) / 4;
+       }
+
+       if (const_state->image_dims.count > 0) {
+               unsigned cnt = const_state->image_dims.count;
+               const_state->offsets.image_dims = constoff;
+               constoff += align(cnt, 4) / 4;
+       }
+
+       unsigned num_driver_params = 0;
+       if (shader->type == MESA_SHADER_VERTEX) {
+               num_driver_params = IR3_DP_VS_COUNT;
+       } else if (shader->type == MESA_SHADER_COMPUTE) {
+               num_driver_params = IR3_DP_CS_COUNT;
+       }
+
+       const_state->offsets.driver_param = constoff;
+       constoff += align(num_driver_params, 4) / 4;
+
+       if ((shader->type == MESA_SHADER_VERTEX) &&
+                       (compiler->gpu_id < 500) &&
+                       shader->stream_output.num_outputs > 0) {
+               const_state->offsets.tfbo = constoff;
+               constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
+       }
+
+       const_state->offsets.immediate = constoff;
+}