freedreno: Add layout_resource_for_modifier screen vfunc
[mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
index e5f7aa31b0ebbffe1c6500ded626f4bceaf151a1..0a6c43f0462618f8bbef7ffec03a9b7a13ef038b 100644 (file)
@@ -23,7 +23,7 @@
  */
 
 #include <inttypes.h>
-#include "util/u_format.h"
+#include "util/format/u_format.h"
 #include "util/crc32.h"
 #include "util/u_math.h"
 #include "util/u_memory.h"
@@ -185,7 +185,7 @@ ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
                struct qreg result)
 {
         struct qinst *last_inst = NULL;
-        if (!list_empty(&c->cur_block->instructions))
+        if (!list_is_empty(&c->cur_block->instructions))
                 last_inst = (struct qinst *)c->cur_block->instructions.prev;
 
         assert(result.file == QFILE_UNIF ||
@@ -832,7 +832,7 @@ ntq_src_is_only_ssa_def_user(nir_src *src)
         if (!src->is_ssa)
                 return false;
 
-        if (!list_empty(&src->ssa->if_uses))
+        if (!list_is_empty(&src->ssa->if_uses))
                 return false;
 
         return (src->ssa->uses.next == &src->use_link &&
@@ -2191,6 +2191,7 @@ static const nir_shader_compiler_options nir_options = {
         .lower_ldexp = true,
         .lower_negate = true,
         .lower_rotate = true,
+        .lower_to_scalar = true,
         .max_unroll_iterations = 32,
 };
 
@@ -2257,7 +2258,8 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
                         NIR_PASS_V(c->s, nir_lower_alpha_test,
                                    c->fs_key->alpha_test_func,
                                    c->fs_key->sample_alpha_to_one &&
-                                   c->fs_key->msaa);
+                                   c->fs_key->msaa,
+                                   NULL);
                 }
                 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
         }
@@ -2311,10 +2313,11 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
 
         if (c->key->ucp_enables) {
                 if (stage == QSTAGE_FRAG) {
-                        NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
+                        NIR_PASS_V(c->s, nir_lower_clip_fs,
+                                   c->key->ucp_enables, false);
                 } else {
                         NIR_PASS_V(c->s, nir_lower_clip_vs,
-                                  c->key->ucp_enables, false);
+                                   c->key->ucp_enables, false, false, NULL);
                         NIR_PASS_V(c->s, nir_lower_io_to_scalar,
                                    nir_var_shader_out);
                 }
@@ -2331,10 +2334,25 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
 
         NIR_PASS_V(c->s, vc4_nir_lower_io, c);
         NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
-        NIR_PASS_V(c->s, nir_lower_idiv);
+        NIR_PASS_V(c->s, nir_lower_idiv, nir_lower_idiv_fast);
 
         vc4_optimize_nir(c->s);
 
+        /* Do late algebraic optimization to turn add(a, neg(b)) back into
+         * subs, then the mandatory cleanup after algebraic.  Note that it may
+         * produce fnegs, and if so then we need to keep running to squash
+         * fneg(fneg(a)).
+         */
+        bool more_late_algebraic = true;
+        while (more_late_algebraic) {
+                more_late_algebraic = false;
+                NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
+                NIR_PASS_V(c->s, nir_opt_constant_folding);
+                NIR_PASS_V(c->s, nir_copy_prop);
+                NIR_PASS_V(c->s, nir_opt_dce);
+                NIR_PASS_V(c->s, nir_opt_cse);
+        }
+
         NIR_PASS_V(c->s, nir_lower_bool_to_int32);
 
         NIR_PASS_V(c->s, nir_convert_from_ssa, true);