gallium/ureg: Set the next shader stage from the shader info.
[mesa.git] / src / freedreno / ir3 / ir3_shader.c
index 78ed751a3f25d0c0724b0e39b2b6dddc9b0b695b..519fafec900e22ffd3d2317cb9ba87ea6455bf20 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "util/u_atomic.h"
 #include "util/u_string.h"
+#include "util/u_math.h"
 #include "util/u_memory.h"
 #include "util/format/u_format.h"
 
 #include "ir3_compiler.h"
 #include "ir3_nir.h"
 
+#include "disasm.h"
+
 int
 ir3_glsl_type_size(const struct glsl_type *type, bool bindless)
 {
        return glsl_count_attribute_slots(type, false);
 }
 
-static void
-delete_variant(struct ir3_shader_variant *v)
-{
-       if (v->ir)
-               ir3_destroy(v->ir);
-       assert(!v->bo);
-       if (v->binning)
-               delete_variant(v->binning);
-       free(v->bin);
-       free(v);
-}
-
 /* for vertex shader, the inputs are loaded into registers before the shader
  * is executed, so max_regs from the shader instructions might not properly
  * reflect the # of registers actually used, especially in case passthrough
@@ -133,25 +124,26 @@ fixup_regfootprint(struct ir3_shader_variant *v)
  */
 void * ir3_shader_assemble(struct ir3_shader_variant *v)
 {
-       unsigned gpu_id = v->shader->compiler->gpu_id;
+       const struct ir3_compiler *compiler = v->shader->compiler;
        void *bin;
 
        bin = ir3_assemble(v);
        if (!bin)
                return NULL;
 
-       if (gpu_id >= 400) {
-               v->instrlen = v->info.sizedwords / (2 * 16);
-       } else {
-               v->instrlen = v->info.sizedwords / (2 * 4);
-       }
-
        /* NOTE: if relative addressing is used, we set constlen in
         * the compiler (to worst-case value) since we don't know in
         * the assembler what the max addr reg value can be:
         */
        v->constlen = MAX2(v->constlen, v->info.max_const + 1);
 
+       /* On a4xx and newer, constlen must be a multiple of 16 dwords even though
+        * uploads are in units of 4 dwords. Round it up here to make calculations
+        * regarding the shared constlen simpler.
+        */
+       if (compiler->gpu_id >= 400)
+               v->constlen = align(v->constlen, 4);
+
        fixup_regfootprint(v);
 
        return bin;
@@ -175,17 +167,42 @@ assemble_variant(struct ir3_shader_variant *v)
        v->ir = NULL;
 }
 
+static bool
+compile_variant(struct ir3_shader_variant *v)
+{
+       int ret = ir3_compile_shader_nir(v->shader->compiler, v);
+       if (ret) {
+               _debug_printf("compile failed! (%s:%s)", v->shader->nir->info.name,
+                               v->shader->nir->info.label);
+               return false;
+       }
+
+       assemble_variant(v);
+       if (!v->bin) {
+               _debug_printf("assemble failed! (%s:%s)", v->shader->nir->info.name,
+                               v->shader->nir->info.label);
+               return false;
+       }
+
+       return true;
+}
+
 /*
  * For creating normal shader variants, 'nonbinning' is NULL.  For
  * creating binning pass shader, it is link to corresponding normal
  * (non-binning) variant.
  */
 static struct ir3_shader_variant *
-create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
+alloc_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
                struct ir3_shader_variant *nonbinning)
 {
-       struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
-       int ret;
+       void *mem_ctx = shader;
+       /* hang the binning variant off it's non-binning counterpart instead
+        * of the shader, to simplify the error cleanup paths
+        */
+       if (nonbinning)
+               mem_ctx = nonbinning;
+       struct ir3_shader_variant *v = rzalloc_size(mem_ctx, sizeof(*v));
 
        if (!v)
                return NULL;
@@ -196,67 +213,75 @@ create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
        v->nonbinning = nonbinning;
        v->key = *key;
        v->type = shader->type;
+       v->mergedregs = shader->compiler->gpu_id >= 600;
 
-       if (shader->compiler->gpu_id >= 600) {
-               switch (v->type) {
-               case MESA_SHADER_TESS_CTRL:
-               case MESA_SHADER_TESS_EVAL:
-                       v->mergedregs = false;
-                       break;
-               case MESA_SHADER_VERTEX:
-               case MESA_SHADER_GEOMETRY:
-                       /* For VS/GS, normally do mergedregs, but if there is tess
-                        * we need to not used MERGEDREGS
-                        */
-                       v->mergedregs = !key->tessellation;
-                       break;
-               default:
-                       v->mergedregs = true;
+       if (!v->binning_pass)
+               v->const_state = rzalloc_size(v, sizeof(*v->const_state));
+
+       return v;
+}
+
+static bool
+needs_binning_variant(struct ir3_shader_variant *v)
+{
+       if ((v->type == MESA_SHADER_VERTEX) && ir3_has_binning_vs(&v->key))
+               return true;
+       return false;
+}
+
+static struct ir3_shader_variant *
+create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
+{
+       struct ir3_shader_variant *v = alloc_variant(shader, key, NULL);
+
+       if (!v)
+               goto fail;
+
+       if (needs_binning_variant(v)) {
+               v->binning = alloc_variant(shader, key, v);
+               if (!v->binning)
+                       goto fail;
+       }
+
+       if (ir3_disk_cache_retrieve(shader->compiler, v))
+               return v;
+
+       if (!shader->nir_finalized) {
+               ir3_nir_post_finalize(shader->compiler, shader->nir);
+
+               if (ir3_shader_debug & IR3_DBG_DISASM) {
+                       printf("dump nir%d: type=%d", shader->id, shader->type);
+                       nir_print_shader(shader->nir, stdout);
                }
-       } else {
-               v->mergedregs = false;
+
+               shader->nir_finalized = true;
        }
 
-       ret = ir3_compile_shader_nir(shader->compiler, v);
-       if (ret) {
-               debug_error("compile failed!");
+       if (!compile_variant(v))
                goto fail;
-       }
 
-       assemble_variant(v);
-       if (!v->bin) {
-               debug_error("assemble failed!");
+       if (needs_binning_variant(v) && !compile_variant(v->binning))
                goto fail;
-       }
+
+       ir3_disk_cache_store(shader->compiler, v);
 
        return v;
 
 fail:
-       delete_variant(v);
+       ralloc_free(v);
        return NULL;
 }
 
 static inline struct ir3_shader_variant *
-shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
-               bool *created)
+shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
 {
        struct ir3_shader_variant *v;
 
-       *created = false;
-
        for (v = shader->variants; v; v = v->next)
                if (ir3_shader_key_equal(key, &v->key))
                        return v;
 
-       /* compile new variant if it doesn't exist already: */
-       v = create_variant(shader, key, NULL);
-       if (v) {
-               v->next = shader->variants;
-               shader->variants = v;
-               *created = true;
-       }
-
-       return v;
+       return NULL;
 }
 
 struct ir3_shader_variant *
@@ -264,17 +289,23 @@ ir3_shader_get_variant(struct ir3_shader *shader, const struct ir3_shader_key *k
                bool binning_pass, bool *created)
 {
        mtx_lock(&shader->variants_lock);
-       struct ir3_shader_variant *v =
-                       shader_variant(shader, key, created);
-
-       if (v && binning_pass) {
-               if (!v->binning) {
-                       v->binning = create_variant(shader, key, v);
+       struct ir3_shader_variant *v = shader_variant(shader, key);
+
+       if (!v) {
+               /* compile new variant if it doesn't exist already: */
+               v = create_variant(shader, key);
+               if (v) {
+                       v->next = shader->variants;
+                       shader->variants = v;
                        *created = true;
                }
-               mtx_unlock(&shader->variants_lock);
-               return v->binning;
        }
+
+       if (v && binning_pass) {
+               v = v->binning;
+               assert(v);
+       }
+
        mtx_unlock(&shader->variants_lock);
 
        return v;
@@ -283,16 +314,9 @@ ir3_shader_get_variant(struct ir3_shader *shader, const struct ir3_shader_key *k
 void
 ir3_shader_destroy(struct ir3_shader *shader)
 {
-       struct ir3_shader_variant *v, *t;
-       for (v = shader->variants; v; ) {
-               t = v;
-               v = v->next;
-               delete_variant(t);
-       }
-       free(shader->const_state.immediates);
        ralloc_free(shader->nir);
        mtx_destroy(&shader->variants_lock);
-       free(shader);
+       ralloc_free(shader);
 }
 
 /**
@@ -312,6 +336,10 @@ ir3_setup_used_key(struct ir3_shader *shader)
         */
        key->has_per_samp = true;
 
+       key->safe_constlen = true;
+
+       key->ucp_enables = 0xff;
+
        if (info->stage == MESA_SHADER_FRAGMENT) {
                key->fsaturate_s = ~0;
                key->fsaturate_t = ~0;
@@ -324,6 +352,10 @@ ir3_setup_used_key(struct ir3_shader *shader)
                        key->color_two_side = true;
                }
 
+               if (info->inputs_read & VARYING_BIT_LAYER) {
+                       key->layer_zero = true;
+               }
+
                if ((info->outputs_written & ~(FRAG_RESULT_DEPTH |
                                                                FRAG_RESULT_STENCIL |
                                                                FRAG_RESULT_SAMPLE_MASK)) != 0) {
@@ -351,11 +383,80 @@ ir3_setup_used_key(struct ir3_shader *shader)
        }
 }
 
+
+/* Given an array of constlen's, decrease some of them so that the sum stays
+ * within "combined_limit" while trying to fairly share the reduction. Returns
+ * a bitfield of which stages should be trimmed.
+ */
+static uint32_t
+trim_constlens(unsigned *constlens,
+                          unsigned first_stage, unsigned last_stage,
+                          unsigned combined_limit, unsigned safe_limit)
+{
+   unsigned cur_total = 0;
+   for (unsigned i = first_stage; i <= last_stage; i++) {
+      cur_total += constlens[i];
+   }
+
+   unsigned max_stage = 0;
+   unsigned max_const = 0;
+   uint32_t trimmed = 0;
+
+   while (cur_total > combined_limit) {
+          for (unsigned i = first_stage; i <= last_stage; i++) {
+                  if (constlens[i] >= max_const) {
+                          max_stage = i;
+                          max_const = constlens[i];
+                  }
+          }
+
+          assert(max_const > safe_limit);
+          trimmed |= 1 << max_stage;
+          cur_total = cur_total - max_const + safe_limit;
+          constlens[max_stage] = safe_limit;
+   }
+
+   return trimmed;
+}
+
+/* Figures out which stages in the pipeline to use the "safe" constlen for, in
+ * order to satisfy all shared constlen limits.
+ */
+uint32_t
+ir3_trim_constlen(struct ir3_shader_variant **variants,
+                                 const struct ir3_compiler *compiler)
+{
+       unsigned constlens[MESA_SHADER_STAGES] = {};
+
+       for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+               if (variants[i])
+                       constlens[i] = variants[i]->constlen;
+       }
+
+       uint32_t trimmed = 0;
+       STATIC_ASSERT(MESA_SHADER_STAGES <= 8 * sizeof(trimmed));
+
+       /* There are two shared limits to take into account, the geometry limit on
+        * a6xx and the total limit. The frag limit on a6xx only matters for a
+        * single stage, so it's always satisfied with the first variant.
+        */
+       if (compiler->gpu_id >= 600) {
+               trimmed |=
+                       trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_GEOMETRY,
+                                                  compiler->max_const_geom, compiler->max_const_safe);
+       }
+       trimmed |=
+               trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_FRAGMENT,
+                                          compiler->max_const_pipeline, compiler->max_const_safe);
+
+       return trimmed;
+}
+
 struct ir3_shader *
 ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
                unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output)
 {
-       struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
+       struct ir3_shader *shader = rzalloc_size(NULL, sizeof(*shader));
 
        mtx_init(&shader->variants_lock, mtx_plain);
        shader->compiler = compiler;
@@ -363,41 +464,10 @@ ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
        shader->type = nir->info.stage;
        if (stream_output)
                memcpy(&shader->stream_output, stream_output, sizeof(shader->stream_output));
-       shader->const_state.num_reserved_user_consts = reserved_user_consts;
-
-       if (nir->info.stage == MESA_SHADER_GEOMETRY)
-               NIR_PASS_V(nir, ir3_nir_lower_gs);
-
-       NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
-                          (nir_lower_io_options)0);
-
-       if (compiler->gpu_id >= 600 &&
-                       nir->info.stage == MESA_SHADER_FRAGMENT &&
-                       !(ir3_shader_debug & IR3_DBG_NOFP16))
-               NIR_PASS_V(nir, nir_lower_mediump_outputs);
-
-       if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-               /* NOTE: lower load_barycentric_at_sample first, since it
-                * produces load_barycentric_at_offset:
-                */
-               NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
-               NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
-
-               NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
-       }
-
-       NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
-
-       NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size);
-
-       /* do first pass optimization, ignoring the key: */
-       ir3_optimize_nir(shader, nir);
-
+       shader->num_reserved_user_consts = reserved_user_consts;
        shader->nir = nir;
-       if (ir3_shader_debug & IR3_DBG_DISASM) {
-               printf("dump nir%d: type=%d", shader->id, shader->type);
-               nir_print_shader(shader->nir, stdout);
-       }
+
+       ir3_disk_cache_init_shader_key(compiler, shader);
 
        ir3_setup_used_key(shader);
 
@@ -495,13 +565,13 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
        }
 
        const struct ir3_const_state *const_state = ir3_const_state(so);
-       for (i = 0; i < const_state->immediates_count; i++) {
+       for (i = 0; i < DIV_ROUND_UP(const_state->immediates_count, 4); i++) {
                fprintf(out, "@const(c%d.x)\t", const_state->offsets.immediate + i);
                fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
-                               const_state->immediates[i].val[0],
-                               const_state->immediates[i].val[1],
-                               const_state->immediates[i].val[2],
-                               const_state->immediates[i].val[3]);
+                               const_state->immediates[i * 4 + 0],
+                               const_state->immediates[i * 4 + 1],
+                               const_state->immediates[i * 4 + 2],
+                               const_state->immediates[i * 4 + 3]);
        }
 
        disasm_a3xx(bin, so->info.sizedwords, 0, out, ir->compiler->gpu_id);
@@ -545,6 +615,17 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
                        so->info.max_reg + 1,
                        so->constlen);
 
+       fprintf(out, "; %s prog %d/%d: %u cat0, %u cat1, %u cat2, %u cat3, %u cat4, %u cat5, %u cat6, %u cat7, \n",
+                       type, so->shader->id, so->id,
+                       so->info.instrs_per_cat[0],
+                       so->info.instrs_per_cat[1],
+                       so->info.instrs_per_cat[2],
+                       so->info.instrs_per_cat[3],
+                       so->info.instrs_per_cat[4],
+                       so->info.instrs_per_cat[5],
+                       so->info.instrs_per_cat[6],
+                       so->info.instrs_per_cat[7]);
+
        fprintf(out, "; %s prog %d/%d: %u sstall, %u (ss), %u (sy), %d max_sun, %d loops\n",
                        type, so->shader->id, so->id,
                        so->info.sstall,