nir/lower_idiv: add new llvm-based path
[mesa.git] / src / freedreno / ir3 / ir3_nir.c
index 804196f63e9255baba7708f2bb800a49ff843291..99659a7ddef40253e3d671f069e84e189f3c8065 100644 (file)
 
 
 #include "util/debug.h"
+#include "util/u_math.h"
 
 #include "ir3_nir.h"
 #include "ir3_compiler.h"
 #include "ir3_shader.h"
 
+static void ir3_setup_const_state(struct ir3_shader *shader, nir_shader *nir);
+
 static const nir_shader_compiler_options options = {
                .lower_fpow = true,
                .lower_scmp = true,
                .lower_flrp32 = true,
                .lower_flrp64 = true,
                .lower_ffract = true,
-               .lower_fmod32 = true,
-               .lower_fmod64 = true,
+               .lower_fmod = true,
                .lower_fdiv = true,
                .lower_isign = true,
                .lower_ldexp = true,
@@ -52,8 +54,10 @@ static const nir_shader_compiler_options options = {
                .lower_helper_invocation = true,
                .lower_bitfield_insert_to_shifts = true,
                .lower_bitfield_extract_to_shifts = true,
-               .lower_bfm = true,
                .use_interpolated_input_intrinsics = true,
+               .lower_rotate = true,
+               .lower_to_scalar = true,
+               .has_imul24 = true,
 };
 
 /* we don't want to lower vertex_id to _zero_based on newer gpus: */
@@ -63,8 +67,7 @@ static const nir_shader_compiler_options options_a6xx = {
                .lower_flrp32 = true,
                .lower_flrp64 = true,
                .lower_ffract = true,
-               .lower_fmod32 = true,
-               .lower_fmod64 = true,
+               .lower_fmod = true,
                .lower_fdiv = true,
                .lower_isign = true,
                .lower_ldexp = true,
@@ -78,8 +81,11 @@ static const nir_shader_compiler_options options_a6xx = {
                .lower_helper_invocation = true,
                .lower_bitfield_insert_to_shifts = true,
                .lower_bitfield_extract_to_shifts = true,
-               .lower_bfm = true,
                .use_interpolated_input_intrinsics = true,
+               .lower_rotate = true,
+               .vectorize_io = true,
+               .lower_to_scalar = true,
+               .has_imul24 = true,
 };
 
 const nir_shader_compiler_options *
@@ -97,7 +103,8 @@ ir3_key_lowers_nir(const struct ir3_shader_key *key)
        return key->fsaturate_s | key->fsaturate_t | key->fsaturate_r |
                        key->vsaturate_s | key->vsaturate_t | key->vsaturate_r |
                        key->ucp_enables | key->color_two_side |
-                       key->fclamp_color | key->vclamp_color;
+                       key->fclamp_color | key->vclamp_color |
+                       key->has_gs;
 }
 
 #define OPT(nir, pass, ...) ({                             \
@@ -123,7 +130,7 @@ ir3_optimize_loop(nir_shader *s)
                OPT_V(s, nir_lower_vars_to_ssa);
                progress |= OPT(s, nir_opt_copy_prop_vars);
                progress |= OPT(s, nir_opt_dead_write_vars);
-               progress |= OPT(s, nir_lower_alu_to_scalar);
+               progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
                progress |= OPT(s, nir_lower_phis_to_scalar);
 
                progress |= OPT(s, nir_copy_prop);
@@ -173,7 +180,7 @@ ir3_optimize_loop(nir_shader *s)
        } while (progress);
 }
 
-struct nir_shader *
+void
 ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
                const struct ir3_shader_key *key)
 {
@@ -182,6 +189,19 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
                        .lower_tg4_offsets = true,
        };
 
+       if (key && key->has_gs) {
+               switch (shader->type) {
+               case MESA_SHADER_VERTEX:
+                       NIR_PASS_V(s, ir3_nir_lower_vs_to_explicit_io, shader);
+                       break;
+               case MESA_SHADER_GEOMETRY:
+                       NIR_PASS_V(s, ir3_nir_lower_gs, shader);
+                       break;
+               default:
+                       break;
+               }
+       }
+
        if (key) {
                switch (shader->type) {
                case MESA_SHADER_FRAGMENT:
@@ -219,11 +239,11 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
 
        if (key) {
                if (s->info.stage == MESA_SHADER_VERTEX) {
-                       OPT_V(s, nir_lower_clip_vs, key->ucp_enables, false);
+                       OPT_V(s, nir_lower_clip_vs, key->ucp_enables, false, false, NULL);
                        if (key->vclamp_color)
                                OPT_V(s, nir_lower_clamp_color_outputs);
                } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
-                       OPT_V(s, nir_lower_clip_fs, key->ucp_enables);
+                       OPT_V(s, nir_lower_clip_fs, key->ucp_enables, false);
                        if (key->fclamp_color)
                                OPT_V(s, nir_lower_clamp_color_outputs);
                }
@@ -257,13 +277,27 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
         * NOTE that UBO analysis pass should only be done once, before variants
         */
        const bool ubo_progress = !key && OPT(s, ir3_nir_analyze_ubo_ranges, shader);
-       const bool idiv_progress = OPT(s, nir_lower_idiv);
+       const bool idiv_progress = OPT(s, nir_lower_idiv, nir_lower_idiv_fast);
        if (ubo_progress || idiv_progress)
                ir3_optimize_loop(s);
 
+       /* Do late algebraic optimization to turn add(a, neg(b)) back into
+       * subs, then the mandatory cleanup after algebraic.  Note that it may
+       * produce fnegs, and if so then we need to keep running to squash
+       * fneg(fneg(a)).
+       */
+       bool more_late_algebraic = true;
+       while (more_late_algebraic) {
+               more_late_algebraic = OPT(s, nir_opt_algebraic_late);
+               OPT_V(s, nir_opt_constant_folding);
+               OPT_V(s, nir_copy_prop);
+               OPT_V(s, nir_opt_dce);
+               OPT_V(s, nir_opt_cse);
+       }
+
        OPT_V(s, nir_remove_dead_variables, nir_var_function_temp);
 
-       OPT_V(s, nir_move_load_const);
+       OPT_V(s, nir_opt_sink, nir_move_const_undef);
 
        if (ir3_shader_debug & IR3_DBG_DISASM) {
                debug_printf("----------------------\n");
@@ -273,10 +307,16 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
 
        nir_sweep(s);
 
-       return s;
+       /* The first time thru, when not creating variant, do the one-time
+        * const_state layout setup.  This should be done after ubo range
+        * analysis.
+        */
+       if (!key) {
+               ir3_setup_const_state(shader, s);
+       }
 }
 
-void
+static void
 ir3_nir_scan_driver_consts(nir_shader *shader,
                struct ir3_const_state *layout)
 {
@@ -304,8 +344,10 @@ ir3_nir_scan_driver_consts(nir_shader *shader,
                                        layout->ssbo_size.count += 1; /* one const per */
                                        break;
                                case nir_intrinsic_image_deref_atomic_add:
-                               case nir_intrinsic_image_deref_atomic_min:
-                               case nir_intrinsic_image_deref_atomic_max:
+                               case nir_intrinsic_image_deref_atomic_imin:
+                               case nir_intrinsic_image_deref_atomic_umin:
+                               case nir_intrinsic_image_deref_atomic_imax:
+                               case nir_intrinsic_image_deref_atomic_umax:
                                case nir_intrinsic_image_deref_atomic_and:
                                case nir_intrinsic_image_deref_atomic_or:
                                case nir_intrinsic_image_deref_atomic_xor:
@@ -321,6 +363,31 @@ ir3_nir_scan_driver_consts(nir_shader *shader,
                                                layout->image_dims.count;
                                        layout->image_dims.count += 3; /* three const per */
                                        break;
+                               case nir_intrinsic_load_ubo:
+                                       if (nir_src_is_const(intr->src[0])) {
+                                               layout->num_ubos = MAX2(layout->num_ubos,
+                                                               nir_src_as_uint(intr->src[0]) + 1);
+                                       } else {
+                                               layout->num_ubos = shader->info.num_ubos;
+                                       }
+                                       break;
+                               case nir_intrinsic_load_base_vertex:
+                               case nir_intrinsic_load_first_vertex:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_VTXID_BASE + 1);
+                                       break;
+                               case nir_intrinsic_load_user_clip_plane:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_UCP7_W + 1);
+                                       break;
+                               case nir_intrinsic_load_num_work_groups:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_NUM_WORK_GROUPS_Z + 1);
+                                       break;
+                               case nir_intrinsic_load_local_group_size:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_LOCAL_GROUP_SIZE_Z + 1);
+                                       break;
                                default:
                                        break;
                                }
@@ -328,3 +395,71 @@ ir3_nir_scan_driver_consts(nir_shader *shader,
                }
        }
 }
+
+static void
+ir3_setup_const_state(struct ir3_shader *shader, nir_shader *nir)
+{
+       struct ir3_compiler *compiler = shader->compiler;
+       struct ir3_const_state *const_state = &shader->const_state;
+
+       memset(&const_state->offsets, ~0, sizeof(const_state->offsets));
+
+       ir3_nir_scan_driver_consts(nir, const_state);
+
+       if ((compiler->gpu_id < 500) &&
+                       (shader->stream_output.num_outputs > 0)) {
+               const_state->num_driver_params =
+                       MAX2(const_state->num_driver_params, IR3_DP_VTXCNT_MAX + 1);
+       }
+
+       /* num_driver_params is scalar, align to vec4: */
+       const_state->num_driver_params = align(const_state->num_driver_params, 4);
+
+       debug_assert((shader->ubo_state.size % 16) == 0);
+       unsigned constoff = align(shader->ubo_state.size / 16, 8);
+       unsigned ptrsz = ir3_pointer_size(compiler);
+
+       if (const_state->num_ubos > 0) {
+               const_state->offsets.ubo = constoff;
+               constoff += align(nir->info.num_ubos * ptrsz, 4) / 4;
+       }
+
+       if (const_state->ssbo_size.count > 0) {
+               unsigned cnt = const_state->ssbo_size.count;
+               const_state->offsets.ssbo_sizes = constoff;
+               constoff += align(cnt, 4) / 4;
+       }
+
+       if (const_state->image_dims.count > 0) {
+               unsigned cnt = const_state->image_dims.count;
+               const_state->offsets.image_dims = constoff;
+               constoff += align(cnt, 4) / 4;
+       }
+
+       if (const_state->num_driver_params > 0)
+               const_state->offsets.driver_param = constoff;
+       constoff += const_state->num_driver_params / 4;
+
+       if ((shader->type == MESA_SHADER_VERTEX) &&
+                       (compiler->gpu_id < 500) &&
+                       shader->stream_output.num_outputs > 0) {
+               const_state->offsets.tfbo = constoff;
+               constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
+       }
+
+       switch (shader->type) {
+       case MESA_SHADER_VERTEX:
+               const_state->offsets.primitive_param = constoff;
+               constoff += 1;
+               break;
+       case MESA_SHADER_GEOMETRY:
+               const_state->offsets.primitive_param = constoff;
+               const_state->offsets.primitive_map = constoff + 1;
+               constoff += 1 + DIV_ROUND_UP(nir->num_inputs, 4);
+               break;
+       default:
+               break;
+       }
+
+       const_state->offsets.immediate = constoff;
+}