vc4: do not report alpha-test as supported
[mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
index 7f6809c087cc996776bd8fe806f66feaea3ecaa2..fc1488619892bf6bd66996ac57f6bc0be86dbea0 100644 (file)
@@ -1128,8 +1128,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
         struct qreg result;
 
         switch (instr->op) {
-        case nir_op_fmov:
-        case nir_op_imov:
+        case nir_op_mov:
                 result = qir_MOV(c, src[0]);
                 break;
         case nir_op_fmul:
@@ -1442,11 +1441,6 @@ emit_point_size_write(struct vc4_compile *c)
         else
                 point_size = qir_uniform_f(c, 1.0);
 
-        /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
-         * BCM21553).
-         */
-        point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
-
         qir_VPM_WRITE(c, point_size);
 }
 
@@ -1527,12 +1521,16 @@ static void
 vc4_optimize_nir(struct nir_shader *s)
 {
         bool progress;
+        unsigned lower_flrp =
+                (s->options->lower_flrp16 ? 16 : 0) |
+                (s->options->lower_flrp32 ? 32 : 0) |
+                (s->options->lower_flrp64 ? 64 : 0);
 
         do {
                 progress = false;
 
                 NIR_PASS_V(s, nir_lower_vars_to_ssa);
-                NIR_PASS(progress, s, nir_lower_alu_to_scalar);
+                NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
                 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
                 NIR_PASS(progress, s, nir_copy_prop);
                 NIR_PASS(progress, s, nir_opt_remove_phis);
@@ -1542,6 +1540,24 @@ vc4_optimize_nir(struct nir_shader *s)
                 NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
                 NIR_PASS(progress, s, nir_opt_algebraic);
                 NIR_PASS(progress, s, nir_opt_constant_folding);
+                if (lower_flrp != 0) {
+                        bool lower_flrp_progress = false;
+
+                        NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
+                                 lower_flrp,
+                                 false /* always_precise */,
+                                 s->options->lower_ffma);
+                        if (lower_flrp_progress) {
+                                NIR_PASS(progress, s, nir_opt_constant_folding);
+                                progress = true;
+                        }
+
+                        /* Nothing should rematerialize any flrps, so we only
+                         * need to do this lowering once.
+                         */
+                        lower_flrp = 0;
+                }
+
                 NIR_PASS(progress, s, nir_opt_undef);
                 NIR_PASS(progress, s, nir_opt_loop_unroll,
                          nir_var_shader_in |
@@ -1790,11 +1806,6 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
                                            0));
                 break;
 
-        case nir_intrinsic_load_alpha_ref_float:
-                ntq_store_dest(c, &instr->dest, 0,
-                               qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
-                break;
-
         case nir_intrinsic_load_sample_mask_in:
                 ntq_store_dest(c, &instr->dest, 0,
                                qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
@@ -2168,12 +2179,14 @@ static const nir_shader_compiler_options nir_options = {
         .lower_fdiv = true,
         .lower_ffma = true,
         .lower_flrp32 = true,
+        .lower_fmod = true,
         .lower_fpow = true,
         .lower_fsat = true,
         .lower_fsqrt = true,
         .lower_ldexp = true,
         .lower_negate = true,
-        .native_integers = true,
+        .lower_rotate = true,
+        .lower_to_scalar = true,
         .max_unroll_iterations = 32,
 };
 
@@ -2235,15 +2248,8 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
 
         c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
 
-        if (stage == QSTAGE_FRAG) {
-                if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) {
-                        NIR_PASS_V(c->s, nir_lower_alpha_test,
-                                   c->fs_key->alpha_test_func,
-                                   c->fs_key->sample_alpha_to_one &&
-                                   c->fs_key->msaa);
-                }
+        if (stage == QSTAGE_FRAG)
                 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
-        }
 
         struct nir_lower_tex_options tex_options = {
                 /* We would need to implement txs, but we don't want the
@@ -2294,10 +2300,11 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
 
         if (c->key->ucp_enables) {
                 if (stage == QSTAGE_FRAG) {
-                        NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
+                        NIR_PASS_V(c->s, nir_lower_clip_fs,
+                                   c->key->ucp_enables, false);
                 } else {
                         NIR_PASS_V(c->s, nir_lower_clip_vs,
-                                  c->key->ucp_enables, false);
+                                   c->key->ucp_enables, false, false, NULL);
                         NIR_PASS_V(c->s, nir_lower_io_to_scalar,
                                    nir_var_shader_out);
                 }
@@ -2318,6 +2325,21 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
 
         vc4_optimize_nir(c->s);
 
+        /* Do late algebraic optimization to turn add(a, neg(b)) back into
+         * subs, then the mandatory cleanup after algebraic.  Note that it may
+         * produce fnegs, and if so then we need to keep running to squash
+         * fneg(fneg(a)).
+         */
+        bool more_late_algebraic = true;
+        while (more_late_algebraic) {
+                more_late_algebraic = false;
+                NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
+                NIR_PASS_V(c->s, nir_opt_constant_folding);
+                NIR_PASS_V(c->s, nir_copy_prop);
+                NIR_PASS_V(c->s, nir_opt_dce);
+                NIR_PASS_V(c->s, nir_opt_cse);
+        }
+
         NIR_PASS_V(c->s, nir_lower_bool_to_int32);
 
         NIR_PASS_V(c->s, nir_convert_from_ssa, true);
@@ -2434,6 +2456,9 @@ vc4_shader_state_create(struct pipe_context *pctx,
                 s = tgsi_to_nir(cso->tokens, pctx->screen);
         }
 
+        if (s->info.stage == MESA_SHADER_VERTEX)
+                NIR_PASS_V(s, nir_lower_point_size, 1.0f, 0.0f);
+
         NIR_PASS_V(s, nir_lower_io, nir_var_all, type_size,
                    (nir_lower_io_options)0);