freedreno: deduplicate a3xx+ disasm
[mesa.git] / src / freedreno / ir3 / ir3_shader.c
index a2ca295845e950016fbc5e0c01ab25d0c11fbde9..99cacbf3301308dc8f0b536a99d62244520e46f4 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "util/u_atomic.h"
 #include "util/u_string.h"
+#include "util/u_math.h"
 #include "util/u_memory.h"
 #include "util/format/u_format.h"
 
 #include "ir3_compiler.h"
 #include "ir3_nir.h"
 
+#include "disasm.h"
+
 int
 ir3_glsl_type_size(const struct glsl_type *type, bool bindless)
 {
        return glsl_count_attribute_slots(type, false);
 }
 
-static void
-delete_variant(struct ir3_shader_variant *v)
-{
-       if (v->ir)
-               ir3_destroy(v->ir);
-       assert(!v->bo);
-       if (v->binning)
-               delete_variant(v->binning);
-       free(v->bin);
-       free(v);
-}
-
 /* for vertex shader, the inputs are loaded into registers before the shader
  * is executed, so max_regs from the shader instructions might not properly
  * reflect the # of registers actually used, especially in case passthrough
@@ -64,7 +55,7 @@ delete_variant(struct ir3_shader_variant *v)
  * the reg off.
  */
 static void
-fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
+fixup_regfootprint(struct ir3_shader_variant *v)
 {
        unsigned i;
 
@@ -86,7 +77,7 @@ fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
                        unsigned n = util_last_bit(v->inputs[i].compmask) - 1;
                        int32_t regid = v->inputs[i].regid + n;
                        if (v->inputs[i].half) {
-                               if (gpu_id < 500) {
+                               if (!v->mergedregs) {
                                        v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
                                } else {
                                        v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
@@ -103,7 +94,7 @@ fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
                        continue;
                int32_t regid = v->outputs[i].regid + 3;
                if (v->outputs[i].half) {
-                       if (gpu_id < 500) {
+                       if (!v->mergedregs) {
                                v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
                        } else {
                                v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
@@ -117,7 +108,7 @@ fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
                unsigned n = util_last_bit(v->sampler_prefetch[i].wrmask) - 1;
                int32_t regid = v->sampler_prefetch[i].dst + n;
                if (v->sampler_prefetch[i].half_precision) {
-                       if (gpu_id < 500) {
+                       if (!v->mergedregs) {
                                v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
                        } else {
                                v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
@@ -152,7 +143,14 @@ void * ir3_shader_assemble(struct ir3_shader_variant *v)
         */
        v->constlen = MAX2(v->constlen, v->info.max_const + 1);
 
-       fixup_regfootprint(v, gpu_id);
+       /* On a4xx and newer, constlen must be a multiple of 16 dwords even though
+        * uploads are in units of 4 dwords. Round it up here to make calculations
+        * regarding the shared constlen simpler.
+        */
+       if (gpu_id >= 400)
+               v->constlen = align(v->constlen, 4);
+
+       fixup_regfootprint(v);
 
        return bin;
 }
@@ -175,17 +173,42 @@ assemble_variant(struct ir3_shader_variant *v)
        v->ir = NULL;
 }
 
+static bool
+compile_variant(struct ir3_shader_variant *v)
+{
+       int ret = ir3_compile_shader_nir(v->shader->compiler, v);
+       if (ret) {
+               _debug_printf("compile failed! (%s:%s)", v->shader->nir->info.name,
+                               v->shader->nir->info.label);
+               return false;
+       }
+
+       assemble_variant(v);
+       if (!v->bin) {
+               _debug_printf("assemble failed! (%s:%s)", v->shader->nir->info.name,
+                               v->shader->nir->info.label);
+               return false;
+       }
+
+       return true;
+}
+
 /*
  * For creating normal shader variants, 'nonbinning' is NULL.  For
  * creating binning pass shader, it is link to corresponding normal
  * (non-binning) variant.
  */
 static struct ir3_shader_variant *
-create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
+alloc_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
                struct ir3_shader_variant *nonbinning)
 {
-       struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
-       int ret;
+       void *mem_ctx = shader;
+       /* hang the binning variant off it's non-binning counterpart instead
+        * of the shader, to simplify the error cleanup paths
+        */
+       if (nonbinning)
+               mem_ctx = nonbinning;
+       struct ir3_shader_variant *v = rzalloc_size(mem_ctx, sizeof(*v));
 
        if (!v)
                return NULL;
@@ -196,65 +219,99 @@ create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
        v->nonbinning = nonbinning;
        v->key = *key;
        v->type = shader->type;
+       v->mergedregs = shader->compiler->gpu_id >= 600;
 
-       ret = ir3_compile_shader_nir(shader->compiler, v);
-       if (ret) {
-               debug_error("compile failed!");
+       if (!v->binning_pass)
+               v->const_state = rzalloc_size(v, sizeof(*v->const_state));
+
+       return v;
+}
+
+static bool
+needs_binning_variant(struct ir3_shader_variant *v)
+{
+       if ((v->type == MESA_SHADER_VERTEX) && ir3_has_binning_vs(&v->key))
+               return true;
+       return false;
+}
+
+static struct ir3_shader_variant *
+create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
+{
+       struct ir3_shader_variant *v = alloc_variant(shader, key, NULL);
+
+       if (!v)
                goto fail;
+
+       if (needs_binning_variant(v)) {
+               v->binning = alloc_variant(shader, key, v);
+               if (!v->binning)
+                       goto fail;
        }
 
-       assemble_variant(v);
-       if (!v->bin) {
-               debug_error("assemble failed!");
-               goto fail;
+       if (ir3_disk_cache_retrieve(shader->compiler, v))
+               return v;
+
+       if (!shader->nir_finalized) {
+               ir3_nir_post_finalize(shader->compiler, shader->nir);
+
+               if (ir3_shader_debug & IR3_DBG_DISASM) {
+                       printf("dump nir%d: type=%d", shader->id, shader->type);
+                       nir_print_shader(shader->nir, stdout);
+               }
+
+               shader->nir_finalized = true;
        }
 
+       if (!compile_variant(v))
+               goto fail;
+
+       if (needs_binning_variant(v) && !compile_variant(v->binning))
+               goto fail;
+
+       ir3_disk_cache_store(shader->compiler, v);
+
        return v;
 
 fail:
-       delete_variant(v);
+       ralloc_free(v);
        return NULL;
 }
 
 static inline struct ir3_shader_variant *
-shader_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
-               bool *created)
+shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
 {
        struct ir3_shader_variant *v;
 
-       *created = false;
-
        for (v = shader->variants; v; v = v->next)
                if (ir3_shader_key_equal(key, &v->key))
                        return v;
 
-       /* compile new variant if it doesn't exist already: */
-       v = create_variant(shader, key, NULL);
-       if (v) {
-               v->next = shader->variants;
-               shader->variants = v;
-               *created = true;
-       }
-
-       return v;
+       return NULL;
 }
 
 struct ir3_shader_variant *
-ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
+ir3_shader_get_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
                bool binning_pass, bool *created)
 {
        mtx_lock(&shader->variants_lock);
-       struct ir3_shader_variant *v =
-                       shader_variant(shader, key, created);
-
-       if (v && binning_pass) {
-               if (!v->binning) {
-                       v->binning = create_variant(shader, key, v);
+       struct ir3_shader_variant *v = shader_variant(shader, key);
+
+       if (!v) {
+               /* compile new variant if it doesn't exist already: */
+               v = create_variant(shader, key);
+               if (v) {
+                       v->next = shader->variants;
+                       shader->variants = v;
                        *created = true;
                }
-               mtx_unlock(&shader->variants_lock);
-               return v->binning;
        }
+
+       if (v && binning_pass) {
+               v = v->binning;
+               assert(v);
+       }
+
        mtx_unlock(&shader->variants_lock);
 
        return v;
@@ -263,16 +320,9 @@ ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
 void
 ir3_shader_destroy(struct ir3_shader *shader)
 {
-       struct ir3_shader_variant *v, *t;
-       for (v = shader->variants; v; ) {
-               t = v;
-               v = v->next;
-               delete_variant(t);
-       }
-       free(shader->const_state.immediates);
        ralloc_free(shader->nir);
        mtx_destroy(&shader->variants_lock);
-       free(shader);
+       ralloc_free(shader);
 }
 
 /**
@@ -292,6 +342,10 @@ ir3_setup_used_key(struct ir3_shader *shader)
         */
        key->has_per_samp = true;
 
+       key->safe_constlen = true;
+
+       key->ucp_enables = 0xff;
+
        if (info->stage == MESA_SHADER_FRAGMENT) {
                key->fsaturate_s = ~0;
                key->fsaturate_t = ~0;
@@ -304,6 +358,10 @@ ir3_setup_used_key(struct ir3_shader *shader)
                        key->color_two_side = true;
                }
 
+               if (info->inputs_read & VARYING_BIT_LAYER) {
+                       key->layer_zero = true;
+               }
+
                if ((info->outputs_written & ~(FRAG_RESULT_DEPTH |
                                                                FRAG_RESULT_STENCIL |
                                                                FRAG_RESULT_SAMPLE_MASK)) != 0) {
@@ -331,11 +389,80 @@ ir3_setup_used_key(struct ir3_shader *shader)
        }
 }
 
+
+/* Given an array of constlen's, decrease some of them so that the sum stays
+ * within "combined_limit" while trying to fairly share the reduction. Returns
+ * a bitfield of which stages should be trimmed.
+ */
+static uint32_t
+trim_constlens(unsigned *constlens,
+                          unsigned first_stage, unsigned last_stage,
+                          unsigned combined_limit, unsigned safe_limit)
+{
+   unsigned cur_total = 0;
+   for (unsigned i = first_stage; i <= last_stage; i++) {
+      cur_total += constlens[i];
+   }
+
+   unsigned max_stage = 0;
+   unsigned max_const = 0;
+   uint32_t trimmed = 0;
+
+   while (cur_total > combined_limit) {
+          for (unsigned i = first_stage; i <= last_stage; i++) {
+                  if (constlens[i] >= max_const) {
+                          max_stage = i;
+                          max_const = constlens[i];
+                  }
+          }
+
+          assert(max_const > safe_limit);
+          trimmed |= 1 << max_stage;
+          cur_total = cur_total - max_const + safe_limit;
+          constlens[max_stage] = safe_limit;
+   }
+
+   return trimmed;
+}
+
+/* Figures out which stages in the pipeline to use the "safe" constlen for, in
+ * order to satisfy all shared constlen limits.
+ */
+uint32_t
+ir3_trim_constlen(struct ir3_shader_variant **variants,
+                                 const struct ir3_compiler *compiler)
+{
+       unsigned constlens[MESA_SHADER_STAGES] = {};
+
+       for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+               if (variants[i])
+                       constlens[i] = variants[i]->constlen;
+       }
+
+       uint32_t trimmed = 0;
+       STATIC_ASSERT(MESA_SHADER_STAGES <= 8 * sizeof(trimmed));
+
+       /* There are two shared limits to take into account, the geometry limit on
+        * a6xx and the total limit. The frag limit on a6xx only matters for a
+        * single stage, so it's always satisfied with the first variant.
+        */
+       if (compiler->gpu_id >= 600) {
+               trimmed |=
+                       trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_GEOMETRY,
+                                                  compiler->max_const_geom, compiler->max_const_safe);
+       }
+       trimmed |=
+               trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_FRAGMENT,
+                                          compiler->max_const_pipeline, compiler->max_const_safe);
+
+       return trimmed;
+}
+
 struct ir3_shader *
 ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
                unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output)
 {
-       struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
+       struct ir3_shader *shader = rzalloc_size(NULL, sizeof(*shader));
 
        mtx_init(&shader->variants_lock, mtx_plain);
        shader->compiler = compiler;
@@ -343,41 +470,10 @@ ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
        shader->type = nir->info.stage;
        if (stream_output)
                memcpy(&shader->stream_output, stream_output, sizeof(shader->stream_output));
-       shader->const_state.num_reserved_user_consts = reserved_user_consts;
-
-       if (nir->info.stage == MESA_SHADER_GEOMETRY)
-               NIR_PASS_V(nir, ir3_nir_lower_gs);
-
-       NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
-                          (nir_lower_io_options)0);
-
-       if (compiler->gpu_id >= 600 &&
-                       nir->info.stage == MESA_SHADER_FRAGMENT &&
-                       !(ir3_shader_debug & IR3_DBG_NOFP16))
-               NIR_PASS_V(nir, nir_lower_mediump_outputs);
-
-       if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-               /* NOTE: lower load_barycentric_at_sample first, since it
-                * produces load_barycentric_at_offset:
-                */
-               NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
-               NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
-
-               NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
-       }
-
-       NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
-
-       NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size);
-
-       /* do first pass optimization, ignoring the key: */
-       ir3_optimize_nir(shader, nir, NULL);
-
+       shader->num_reserved_user_consts = reserved_user_consts;
        shader->nir = nir;
-       if (ir3_shader_debug & IR3_DBG_DISASM) {
-               printf("dump nir%d: type=%d", shader->id, shader->type);
-               nir_print_shader(shader->nir, stdout);
-       }
+
+       ir3_disk_cache_init_shader_key(compiler, shader);
 
        ir3_setup_used_key(shader);
 
@@ -474,7 +570,7 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
                fprintf(out, "\n");
        }
 
-       struct ir3_const_state *const_state = &so->shader->const_state;
+       const struct ir3_const_state *const_state = ir3_const_state(so);
        for (i = 0; i < const_state->immediates_count; i++) {
                fprintf(out, "@const(c%d.x)\t", const_state->offsets.immediate + i);
                fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",