freedreno/ir3: allow copy-propagate out of fanout
[mesa.git] / src / freedreno / ir3 / ir3_nir.c
index 8d2eef94e57cbd8d8571c98040a99d8b544fd404..99659a7ddef40253e3d671f069e84e189f3c8065 100644 (file)
@@ -40,8 +40,7 @@ static const nir_shader_compiler_options options = {
                .lower_flrp32 = true,
                .lower_flrp64 = true,
                .lower_ffract = true,
-               .lower_fmod32 = true,
-               .lower_fmod64 = true,
+               .lower_fmod = true,
                .lower_fdiv = true,
                .lower_isign = true,
                .lower_ldexp = true,
@@ -55,8 +54,10 @@ static const nir_shader_compiler_options options = {
                .lower_helper_invocation = true,
                .lower_bitfield_insert_to_shifts = true,
                .lower_bitfield_extract_to_shifts = true,
-               .lower_bfm = true,
                .use_interpolated_input_intrinsics = true,
+               .lower_rotate = true,
+               .lower_to_scalar = true,
+               .has_imul24 = true,
 };
 
 /* we don't want to lower vertex_id to _zero_based on newer gpus: */
@@ -66,8 +67,7 @@ static const nir_shader_compiler_options options_a6xx = {
                .lower_flrp32 = true,
                .lower_flrp64 = true,
                .lower_ffract = true,
-               .lower_fmod32 = true,
-               .lower_fmod64 = true,
+               .lower_fmod = true,
                .lower_fdiv = true,
                .lower_isign = true,
                .lower_ldexp = true,
@@ -81,8 +81,11 @@ static const nir_shader_compiler_options options_a6xx = {
                .lower_helper_invocation = true,
                .lower_bitfield_insert_to_shifts = true,
                .lower_bitfield_extract_to_shifts = true,
-               .lower_bfm = true,
                .use_interpolated_input_intrinsics = true,
+               .lower_rotate = true,
+               .vectorize_io = true,
+               .lower_to_scalar = true,
+               .has_imul24 = true,
 };
 
 const nir_shader_compiler_options *
@@ -100,7 +103,8 @@ ir3_key_lowers_nir(const struct ir3_shader_key *key)
        return key->fsaturate_s | key->fsaturate_t | key->fsaturate_r |
                        key->vsaturate_s | key->vsaturate_t | key->vsaturate_r |
                        key->ucp_enables | key->color_two_side |
-                       key->fclamp_color | key->vclamp_color;
+                       key->fclamp_color | key->vclamp_color |
+                       key->has_gs;
 }
 
 #define OPT(nir, pass, ...) ({                             \
@@ -126,7 +130,7 @@ ir3_optimize_loop(nir_shader *s)
                OPT_V(s, nir_lower_vars_to_ssa);
                progress |= OPT(s, nir_opt_copy_prop_vars);
                progress |= OPT(s, nir_opt_dead_write_vars);
-               progress |= OPT(s, nir_lower_alu_to_scalar, NULL);
+               progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
                progress |= OPT(s, nir_lower_phis_to_scalar);
 
                progress |= OPT(s, nir_copy_prop);
@@ -176,7 +180,7 @@ ir3_optimize_loop(nir_shader *s)
        } while (progress);
 }
 
-struct nir_shader *
+void
 ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
                const struct ir3_shader_key *key)
 {
@@ -185,6 +189,19 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
                        .lower_tg4_offsets = true,
        };
 
+       if (key && key->has_gs) {
+               switch (shader->type) {
+               case MESA_SHADER_VERTEX:
+                       NIR_PASS_V(s, ir3_nir_lower_vs_to_explicit_io, shader);
+                       break;
+               case MESA_SHADER_GEOMETRY:
+                       NIR_PASS_V(s, ir3_nir_lower_gs, shader);
+                       break;
+               default:
+                       break;
+               }
+       }
+
        if (key) {
                switch (shader->type) {
                case MESA_SHADER_FRAGMENT:
@@ -222,11 +239,11 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
 
        if (key) {
                if (s->info.stage == MESA_SHADER_VERTEX) {
-                       OPT_V(s, nir_lower_clip_vs, key->ucp_enables, false);
+                       OPT_V(s, nir_lower_clip_vs, key->ucp_enables, false, false, NULL);
                        if (key->vclamp_color)
                                OPT_V(s, nir_lower_clamp_color_outputs);
                } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
-                       OPT_V(s, nir_lower_clip_fs, key->ucp_enables);
+                       OPT_V(s, nir_lower_clip_fs, key->ucp_enables, false);
                        if (key->fclamp_color)
                                OPT_V(s, nir_lower_clamp_color_outputs);
                }
@@ -260,13 +277,27 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
         * NOTE that UBO analysis pass should only be done once, before variants
         */
        const bool ubo_progress = !key && OPT(s, ir3_nir_analyze_ubo_ranges, shader);
-       const bool idiv_progress = OPT(s, nir_lower_idiv);
+       const bool idiv_progress = OPT(s, nir_lower_idiv, nir_lower_idiv_fast);
        if (ubo_progress || idiv_progress)
                ir3_optimize_loop(s);
 
+       /* Do late algebraic optimization to turn add(a, neg(b)) back into
+       * subs, then the mandatory cleanup after algebraic.  Note that it may
+       * produce fnegs, and if so then we need to keep running to squash
+       * fneg(fneg(a)).
+       */
+       bool more_late_algebraic = true;
+       while (more_late_algebraic) {
+               more_late_algebraic = OPT(s, nir_opt_algebraic_late);
+               OPT_V(s, nir_opt_constant_folding);
+               OPT_V(s, nir_copy_prop);
+               OPT_V(s, nir_opt_dce);
+               OPT_V(s, nir_opt_cse);
+       }
+
        OPT_V(s, nir_remove_dead_variables, nir_var_function_temp);
 
-       OPT_V(s, nir_move_load_const);
+       OPT_V(s, nir_opt_sink, nir_move_const_undef);
 
        if (ir3_shader_debug & IR3_DBG_DISASM) {
                debug_printf("----------------------\n");
@@ -283,8 +314,6 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
        if (!key) {
                ir3_setup_const_state(shader, s);
        }
-
-       return s;
 }
 
 static void
@@ -315,8 +344,10 @@ ir3_nir_scan_driver_consts(nir_shader *shader,
                                        layout->ssbo_size.count += 1; /* one const per */
                                        break;
                                case nir_intrinsic_image_deref_atomic_add:
-                               case nir_intrinsic_image_deref_atomic_min:
-                               case nir_intrinsic_image_deref_atomic_max:
+                               case nir_intrinsic_image_deref_atomic_imin:
+                               case nir_intrinsic_image_deref_atomic_umin:
+                               case nir_intrinsic_image_deref_atomic_imax:
+                               case nir_intrinsic_image_deref_atomic_umax:
                                case nir_intrinsic_image_deref_atomic_and:
                                case nir_intrinsic_image_deref_atomic_or:
                                case nir_intrinsic_image_deref_atomic_xor:
@@ -332,6 +363,31 @@ ir3_nir_scan_driver_consts(nir_shader *shader,
                                                layout->image_dims.count;
                                        layout->image_dims.count += 3; /* three const per */
                                        break;
+                               case nir_intrinsic_load_ubo:
+                                       if (nir_src_is_const(intr->src[0])) {
+                                               layout->num_ubos = MAX2(layout->num_ubos,
+                                                               nir_src_as_uint(intr->src[0]) + 1);
+                                       } else {
+                                               layout->num_ubos = shader->info.num_ubos;
+                                       }
+                                       break;
+                               case nir_intrinsic_load_base_vertex:
+                               case nir_intrinsic_load_first_vertex:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_VTXID_BASE + 1);
+                                       break;
+                               case nir_intrinsic_load_user_clip_plane:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_UCP7_W + 1);
+                                       break;
+                               case nir_intrinsic_load_num_work_groups:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_NUM_WORK_GROUPS_Z + 1);
+                                       break;
+                               case nir_intrinsic_load_local_group_size:
+                                       layout->num_driver_params =
+                                               MAX2(layout->num_driver_params, IR3_DP_LOCAL_GROUP_SIZE_Z + 1);
+                                       break;
                                default:
                                        break;
                                }
@@ -350,11 +406,17 @@ ir3_setup_const_state(struct ir3_shader *shader, nir_shader *nir)
 
        ir3_nir_scan_driver_consts(nir, const_state);
 
-       const_state->num_uniforms = nir->num_uniforms;
-       const_state->num_ubos = nir->info.num_ubos;
+       if ((compiler->gpu_id < 500) &&
+                       (shader->stream_output.num_outputs > 0)) {
+               const_state->num_driver_params =
+                       MAX2(const_state->num_driver_params, IR3_DP_VTXCNT_MAX + 1);
+       }
+
+       /* num_driver_params is scalar, align to vec4: */
+       const_state->num_driver_params = align(const_state->num_driver_params, 4);
 
        debug_assert((shader->ubo_state.size % 16) == 0);
-       unsigned constoff = align(shader->ubo_state.size / 16, 4);
+       unsigned constoff = align(shader->ubo_state.size / 16, 8);
        unsigned ptrsz = ir3_pointer_size(compiler);
 
        if (const_state->num_ubos > 0) {
@@ -374,15 +436,9 @@ ir3_setup_const_state(struct ir3_shader *shader, nir_shader *nir)
                constoff += align(cnt, 4) / 4;
        }
 
-       unsigned num_driver_params = 0;
-       if (shader->type == MESA_SHADER_VERTEX) {
-               num_driver_params = IR3_DP_VS_COUNT;
-       } else if (shader->type == MESA_SHADER_COMPUTE) {
-               num_driver_params = IR3_DP_CS_COUNT;
-       }
-
-       const_state->offsets.driver_param = constoff;
-       constoff += align(num_driver_params, 4) / 4;
+       if (const_state->num_driver_params > 0)
+               const_state->offsets.driver_param = constoff;
+       constoff += const_state->num_driver_params / 4;
 
        if ((shader->type == MESA_SHADER_VERTEX) &&
                        (compiler->gpu_id < 500) &&
@@ -391,5 +447,19 @@ ir3_setup_const_state(struct ir3_shader *shader, nir_shader *nir)
                constoff += align(IR3_MAX_SO_BUFFERS * ptrsz, 4) / 4;
        }
 
+       switch (shader->type) {
+       case MESA_SHADER_VERTEX:
+               const_state->offsets.primitive_param = constoff;
+               constoff += 1;
+               break;
+       case MESA_SHADER_GEOMETRY:
+               const_state->offsets.primitive_param = constoff;
+               const_state->offsets.primitive_map = constoff + 1;
+               constoff += 1 + DIV_ROUND_UP(nir->num_inputs, 4);
+               break;
+       default:
+               break;
+       }
+
        const_state->offsets.immediate = constoff;
 }