freedreno: deduplicate a3xx+ disasm
[mesa.git] / src / freedreno / ir3 / ir3_shader.c
index 5ea6ba9390212bb6084d2f16beba0fd6069ede65..99cacbf3301308dc8f0b536a99d62244520e46f4 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "util/u_atomic.h"
 #include "util/u_string.h"
+#include "util/u_math.h"
 #include "util/u_memory.h"
 #include "util/format/u_format.h"
 
 #include "ir3_compiler.h"
 #include "ir3_nir.h"
 
+#include "disasm.h"
+
 int
 ir3_glsl_type_size(const struct glsl_type *type, bool bindless)
 {
        return glsl_count_attribute_slots(type, false);
 }
 
-static void
-delete_variant(struct ir3_shader_variant *v)
-{
-       if (v->ir)
-               ir3_destroy(v->ir);
-       if (v->bo)
-               fd_bo_del(v->bo);
-       free(v);
-}
-
 /* for vertex shader, the inputs are loaded into registers before the shader
  * is executed, so max_regs from the shader instructions might not properly
  * reflect the # of registers actually used, especially in case passthrough
@@ -62,7 +55,7 @@ delete_variant(struct ir3_shader_variant *v)
  * the reg off.
  */
 static void
-fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
+fixup_regfootprint(struct ir3_shader_variant *v)
 {
        unsigned i;
 
@@ -84,7 +77,7 @@ fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
                        unsigned n = util_last_bit(v->inputs[i].compmask) - 1;
                        int32_t regid = v->inputs[i].regid + n;
                        if (v->inputs[i].half) {
-                               if (gpu_id < 500) {
+                               if (!v->mergedregs) {
                                        v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
                                } else {
                                        v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
@@ -101,7 +94,7 @@ fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
                        continue;
                int32_t regid = v->outputs[i].regid + 3;
                if (v->outputs[i].half) {
-                       if (gpu_id < 500) {
+                       if (!v->mergedregs) {
                                v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
                        } else {
                                v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
@@ -115,7 +108,7 @@ fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
                unsigned n = util_last_bit(v->sampler_prefetch[i].wrmask) - 1;
                int32_t regid = v->sampler_prefetch[i].dst + n;
                if (v->sampler_prefetch[i].half_precision) {
-                       if (gpu_id < 500) {
+                       if (!v->mergedregs) {
                                v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2);
                        } else {
                                v->info.max_reg = MAX2(v->info.max_reg, regid >> 3);
@@ -129,11 +122,12 @@ fixup_regfootprint(struct ir3_shader_variant *v, uint32_t gpu_id)
 /* wrapper for ir3_assemble() which does some info fixup based on
  * shader state.  Non-static since used by ir3_cmdline too.
  */
-void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id)
+void * ir3_shader_assemble(struct ir3_shader_variant *v)
 {
+       unsigned gpu_id = v->shader->compiler->gpu_id;
        void *bin;
 
-       bin = ir3_assemble(v->ir, &v->info, gpu_id);
+       bin = ir3_assemble(v);
        if (!bin)
                return NULL;
 
@@ -149,7 +143,14 @@ void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id)
         */
        v->constlen = MAX2(v->constlen, v->info.max_const + 1);
 
-       fixup_regfootprint(v, gpu_id);
+       /* On a4xx and newer, constlen must be a multiple of 16 dwords even though
+        * uploads are in units of 4 dwords. Round it up here to make calculations
+        * regarding the shared constlen simpler.
+        */
+       if (gpu_id >= 400)
+               v->constlen = align(v->constlen, 4);
+
+       fixup_regfootprint(v);
 
        return bin;
 }
@@ -157,47 +158,57 @@ void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id)
 static void
 assemble_variant(struct ir3_shader_variant *v)
 {
-       struct ir3_compiler *compiler = v->shader->compiler;
-       struct shader_info *info = &v->shader->nir->info;
-       uint32_t gpu_id = compiler->gpu_id;
-       uint32_t sz, *bin;
-
-       bin = ir3_shader_assemble(v, gpu_id);
-       sz = v->info.sizedwords * 4;
-
-       v->bo = fd_bo_new(compiler->dev, sz,
-                       DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
-                       DRM_FREEDRENO_GEM_TYPE_KMEM,
-                       "%s:%s", ir3_shader_stage(v), info->name);
-
-       memcpy(fd_bo_map(v->bo), bin, sz);
+       v->bin = ir3_shader_assemble(v);
 
        if (shader_debug_enabled(v->shader->type)) {
                fprintf(stdout, "Native code for unnamed %s shader %s:\n",
                        ir3_shader_stage(v), v->shader->nir->info.name);
                if (v->shader->type == MESA_SHADER_FRAGMENT)
                        fprintf(stdout, "SIMD0\n");
-               ir3_shader_disasm(v, bin, stdout);
+               ir3_shader_disasm(v, v->bin, stdout);
        }
 
-       free(bin);
-
        /* no need to keep the ir around beyond this point: */
        ir3_destroy(v->ir);
        v->ir = NULL;
 }
 
+static bool
+compile_variant(struct ir3_shader_variant *v)
+{
+       int ret = ir3_compile_shader_nir(v->shader->compiler, v);
+       if (ret) {
+               _debug_printf("compile failed! (%s:%s)", v->shader->nir->info.name,
+                               v->shader->nir->info.label);
+               return false;
+       }
+
+       assemble_variant(v);
+       if (!v->bin) {
+               _debug_printf("assemble failed! (%s:%s)", v->shader->nir->info.name,
+                               v->shader->nir->info.label);
+               return false;
+       }
+
+       return true;
+}
+
 /*
  * For creating normal shader variants, 'nonbinning' is NULL.  For
  * creating binning pass shader, it is link to corresponding normal
  * (non-binning) variant.
  */
 static struct ir3_shader_variant *
-create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
+alloc_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
                struct ir3_shader_variant *nonbinning)
 {
-       struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
-       int ret;
+       void *mem_ctx = shader;
+       /* hang the binning variant off it's non-binning counterpart instead
+        * of the shader, to simplify the error cleanup paths
+        */
+       if (nonbinning)
+               mem_ctx = nonbinning;
+       struct ir3_shader_variant *v = rzalloc_size(mem_ctx, sizeof(*v));
 
        if (!v)
                return NULL;
@@ -208,65 +219,99 @@ create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
        v->nonbinning = nonbinning;
        v->key = *key;
        v->type = shader->type;
+       v->mergedregs = shader->compiler->gpu_id >= 600;
 
-       ret = ir3_compile_shader_nir(shader->compiler, v);
-       if (ret) {
-               debug_error("compile failed!");
+       if (!v->binning_pass)
+               v->const_state = rzalloc_size(v, sizeof(*v->const_state));
+
+       return v;
+}
+
+static bool
+needs_binning_variant(struct ir3_shader_variant *v)
+{
+       if ((v->type == MESA_SHADER_VERTEX) && ir3_has_binning_vs(&v->key))
+               return true;
+       return false;
+}
+
+static struct ir3_shader_variant *
+create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
+{
+       struct ir3_shader_variant *v = alloc_variant(shader, key, NULL);
+
+       if (!v)
                goto fail;
+
+       if (needs_binning_variant(v)) {
+               v->binning = alloc_variant(shader, key, v);
+               if (!v->binning)
+                       goto fail;
        }
 
-       assemble_variant(v);
-       if (!v->bo) {
-               debug_error("assemble failed!");
-               goto fail;
+       if (ir3_disk_cache_retrieve(shader->compiler, v))
+               return v;
+
+       if (!shader->nir_finalized) {
+               ir3_nir_post_finalize(shader->compiler, shader->nir);
+
+               if (ir3_shader_debug & IR3_DBG_DISASM) {
+                       printf("dump nir%d: type=%d", shader->id, shader->type);
+                       nir_print_shader(shader->nir, stdout);
+               }
+
+               shader->nir_finalized = true;
        }
 
+       if (!compile_variant(v))
+               goto fail;
+
+       if (needs_binning_variant(v) && !compile_variant(v->binning))
+               goto fail;
+
+       ir3_disk_cache_store(shader->compiler, v);
+
        return v;
 
 fail:
-       delete_variant(v);
+       ralloc_free(v);
        return NULL;
 }
 
 static inline struct ir3_shader_variant *
-shader_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
-               bool *created)
+shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
 {
        struct ir3_shader_variant *v;
 
-       *created = false;
-
        for (v = shader->variants; v; v = v->next)
                if (ir3_shader_key_equal(key, &v->key))
                        return v;
 
-       /* compile new variant if it doesn't exist already: */
-       v = create_variant(shader, key, NULL);
-       if (v) {
-               v->next = shader->variants;
-               shader->variants = v;
-               *created = true;
-       }
-
-       return v;
+       return NULL;
 }
 
 struct ir3_shader_variant *
-ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
+ir3_shader_get_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
                bool binning_pass, bool *created)
 {
        mtx_lock(&shader->variants_lock);
-       struct ir3_shader_variant *v =
-                       shader_variant(shader, key, created);
-
-       if (v && binning_pass) {
-               if (!v->binning) {
-                       v->binning = create_variant(shader, key, v);
+       struct ir3_shader_variant *v = shader_variant(shader, key);
+
+       if (!v) {
+               /* compile new variant if it doesn't exist already: */
+               v = create_variant(shader, key);
+               if (v) {
+                       v->next = shader->variants;
+                       shader->variants = v;
                        *created = true;
                }
-               mtx_unlock(&shader->variants_lock);
-               return v->binning;
        }
+
+       if (v && binning_pass) {
+               v = v->binning;
+               assert(v);
+       }
+
        mtx_unlock(&shader->variants_lock);
 
        return v;
@@ -275,106 +320,162 @@ ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
 void
 ir3_shader_destroy(struct ir3_shader *shader)
 {
-       struct ir3_shader_variant *v, *t;
-       for (v = shader->variants; v; ) {
-               t = v;
-               v = v->next;
-               delete_variant(t);
-       }
-       free(shader->const_state.immediates);
        ralloc_free(shader->nir);
        mtx_destroy(&shader->variants_lock);
-       free(shader);
+       ralloc_free(shader);
 }
 
-static bool
-lower_output_var(nir_shader *nir, int location)
+/**
+ * Creates a bitmask of the used bits of the shader key by this particular
+ * shader.  Used by the gallium driver to skip state-dependent recompiles when
+ * possible.
+ */
+static void
+ir3_setup_used_key(struct ir3_shader *shader)
 {
-       nir_foreach_variable (var, &nir->outputs) {
-               if (var->data.driver_location == location &&
-                               ((var->data.precision == GLSL_PRECISION_MEDIUM) ||
-                                       (var->data.precision == GLSL_PRECISION_LOW))) {
-                       if (glsl_get_base_type(var->type) == GLSL_TYPE_FLOAT)
-                               var->type = glsl_float16_type(var->type);
-
-                       return glsl_get_base_type(var->type) == GLSL_TYPE_FLOAT16;
-               }
-       }
+       nir_shader *nir = shader->nir;
+       struct shader_info *info = &nir->info;
+       struct ir3_shader_key *key = &shader->key_mask;
 
-       return false;
-}
+       /* This key flag is just used to make for a cheaper ir3_shader_key_equal
+        * check in the common case.
+        */
+       key->has_per_samp = true;
 
-static void
-lower_mediump_outputs(nir_shader *nir)
-{
-       nir_function_impl *impl = nir_shader_get_entrypoint(nir);
-       assert(impl);
+       key->safe_constlen = true;
 
-       /* Get rid of old derefs before we change the types of the variables */
-       nir_opt_dce(nir);
+       key->ucp_enables = 0xff;
 
-       nir_builder b;
-       nir_builder_init(&b, impl);
+       if (info->stage == MESA_SHADER_FRAGMENT) {
+               key->fsaturate_s = ~0;
+               key->fsaturate_t = ~0;
+               key->fsaturate_r = ~0;
+               key->fastc_srgb = ~0;
+               key->fsamples = ~0;
 
-       nir_foreach_block_safe (block, impl) {
-               nir_foreach_instr_safe (instr, block) {
-                       if (instr->type != nir_instr_type_intrinsic)
-                               continue;
+               if (info->inputs_read & VARYING_BITS_COLOR) {
+                       key->rasterflat = true;
+                       key->color_two_side = true;
+               }
 
-                       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
-                       if (intr->intrinsic != nir_intrinsic_store_output)
-                               continue;
+               if (info->inputs_read & VARYING_BIT_LAYER) {
+                       key->layer_zero = true;
+               }
 
-                       if (!lower_output_var(nir, nir_intrinsic_base(intr)))
-                               continue;
+               if ((info->outputs_written & ~(FRAG_RESULT_DEPTH |
+                                                               FRAG_RESULT_STENCIL |
+                                                               FRAG_RESULT_SAMPLE_MASK)) != 0) {
+                       key->fclamp_color = true;
+               }
 
-                       b.cursor = nir_before_instr(&intr->instr);
-                       nir_instr_rewrite_src(&intr->instr, &intr->src[0],
-                                       nir_src_for_ssa(nir_f2f16(&b, intr->src[0].ssa)));
+               /* Only used for deciding on behavior of
+                * nir_intrinsic_load_barycentric_sample
+                */
+               key->msaa = info->fs.uses_sample_qualifier;
+       } else {
+               key->tessellation = ~0;
+               key->has_gs = true;
+
+               if (info->outputs_written & VARYING_BITS_COLOR)
+                       key->vclamp_color = true;
+
+               if (info->stage == MESA_SHADER_VERTEX) {
+                       key->vsaturate_s = ~0;
+                       key->vsaturate_t = ~0;
+                       key->vsaturate_r = ~0;
+                       key->vastc_srgb = ~0;
+                       key->vsamples = ~0;
                }
        }
 }
 
-struct ir3_shader *
-ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir)
-{
-       struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
 
-       mtx_init(&shader->variants_lock, mtx_plain);
-       shader->compiler = compiler;
-       shader->id = p_atomic_inc_return(&shader->compiler->shader_count);
-       shader->type = nir->info.stage;
+/* Given an array of constlen's, decrease some of them so that the sum stays
+ * within "combined_limit" while trying to fairly share the reduction. Returns
+ * a bitfield of which stages should be trimmed.
+ */
+static uint32_t
+trim_constlens(unsigned *constlens,
+                          unsigned first_stage, unsigned last_stage,
+                          unsigned combined_limit, unsigned safe_limit)
+{
+   unsigned cur_total = 0;
+   for (unsigned i = first_stage; i <= last_stage; i++) {
+      cur_total += constlens[i];
+   }
+
+   unsigned max_stage = 0;
+   unsigned max_const = 0;
+   uint32_t trimmed = 0;
+
+   while (cur_total > combined_limit) {
+          for (unsigned i = first_stage; i <= last_stage; i++) {
+                  if (constlens[i] >= max_const) {
+                          max_stage = i;
+                          max_const = constlens[i];
+                  }
+          }
+
+          assert(max_const > safe_limit);
+          trimmed |= 1 << max_stage;
+          cur_total = cur_total - max_const + safe_limit;
+          constlens[max_stage] = safe_limit;
+   }
+
+   return trimmed;
+}
 
-       NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
-                          (nir_lower_io_options)0);
+/* Figures out which stages in the pipeline to use the "safe" constlen for, in
+ * order to satisfy all shared constlen limits.
+ */
+uint32_t
+ir3_trim_constlen(struct ir3_shader_variant **variants,
+                                 const struct ir3_compiler *compiler)
+{
+       unsigned constlens[MESA_SHADER_STAGES] = {};
 
-       if (compiler->gpu_id >= 600 &&
-                       nir->info.stage == MESA_SHADER_FRAGMENT &&
-                       !(ir3_shader_debug & IR3_DBG_NOFP16))
-               lower_mediump_outputs(nir);
+       for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+               if (variants[i])
+                       constlens[i] = variants[i]->constlen;
+       }
 
-       if (nir->info.stage == MESA_SHADER_FRAGMENT) {
-               /* NOTE: lower load_barycentric_at_sample first, since it
-                * produces load_barycentric_at_offset:
-                */
-               NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
-               NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
+       uint32_t trimmed = 0;
+       STATIC_ASSERT(MESA_SHADER_STAGES <= 8 * sizeof(trimmed));
 
-               NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
+       /* There are two shared limits to take into account, the geometry limit on
+        * a6xx and the total limit. The frag limit on a6xx only matters for a
+        * single stage, so it's always satisfied with the first variant.
+        */
+       if (compiler->gpu_id >= 600) {
+               trimmed |=
+                       trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_GEOMETRY,
+                                                  compiler->max_const_geom, compiler->max_const_safe);
        }
+       trimmed |=
+               trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_FRAGMENT,
+                                          compiler->max_const_pipeline, compiler->max_const_safe);
 
-       NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
-
-       NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size);
+       return trimmed;
+}
 
-       /* do first pass optimization, ignoring the key: */
-       ir3_optimize_nir(shader, nir, NULL);
+struct ir3_shader *
+ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
+               unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output)
+{
+       struct ir3_shader *shader = rzalloc_size(NULL, sizeof(*shader));
 
+       mtx_init(&shader->variants_lock, mtx_plain);
+       shader->compiler = compiler;
+       shader->id = p_atomic_inc_return(&shader->compiler->shader_count);
+       shader->type = nir->info.stage;
+       if (stream_output)
+               memcpy(&shader->stream_output, stream_output, sizeof(shader->stream_output));
+       shader->num_reserved_user_consts = reserved_user_consts;
        shader->nir = nir;
-       if (ir3_shader_debug & IR3_DBG_DISASM) {
-               printf("dump nir%d: type=%d", shader->id, shader->type);
-               nir_print_shader(shader->nir, stdout);
-       }
+
+       ir3_disk_cache_init_shader_key(compiler, shader);
+
+       ir3_setup_used_key(shader);
 
        return shader;
 }
@@ -436,7 +537,6 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
        uint8_t regid;
        unsigned i;
 
-       struct ir3_instruction *instr;
        foreach_input_n (instr, i, ir) {
                reg = instr->regs[0];
                regid = reg->num;
@@ -452,7 +552,7 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
        /* print pre-dispatch texture fetches: */
        for (i = 0; i < so->num_sampler_prefetch; i++) {
                const struct ir3_sampler_prefetch *fetch = &so->sampler_prefetch[i];
-               fprintf(out, "@tex(%sr%d.%c)\tsrc=%u, samp=%u, tex=%u, wrmask=%x, cmd=%u\n",
+               fprintf(out, "@tex(%sr%d.%c)\tsrc=%u, samp=%u, tex=%u, wrmask=0x%x, cmd=%u\n",
                                fetch->half_precision ? "h" : "",
                                fetch->dst >> 2, "xyzw"[fetch->dst & 0x3],
                                fetch->src, fetch->samp_id, fetch->tex_id,
@@ -470,7 +570,7 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
                fprintf(out, "\n");
        }
 
-       struct ir3_const_state *const_state = &so->shader->const_state;
+       const struct ir3_const_state *const_state = ir3_const_state(so);
        for (i = 0; i < const_state->immediates_count; i++) {
                fprintf(out, "@const(c%d.x)\t", const_state->offsets.immediate + i);
                fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
@@ -506,17 +606,28 @@ ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out)
        fprintf(out, "\n");
 
        /* print generic shader info: */
-       fprintf(out, "; %s prog %d/%d: %u instructions, %d half, %d full\n",
+       fprintf(out, "; %s prog %d/%d: %u instr, %u nops, %u non-nops, %u mov, %u cov, %u dwords\n",
                        type, so->shader->id, so->id,
                        so->info.instrs_count,
-                       so->info.max_half_reg + 1,
-                       so->info.max_reg + 1);
-
-       fprintf(out, "; %u constlen\n", so->constlen);
+                       so->info.nops_count,
+                       so->info.instrs_count - so->info.nops_count,
+                       so->info.mov_count, so->info.cov_count,
+                       so->info.sizedwords);
 
-       fprintf(out, "; %u (ss), %u (sy)\n", so->info.ss, so->info.sy);
+       fprintf(out, "; %s prog %d/%d: %u last-baryf, %d half, %d full, %u constlen\n",
+                       type, so->shader->id, so->id,
+                       so->info.last_baryf,
+                       so->info.max_half_reg + 1,
+                       so->info.max_reg + 1,
+                       so->constlen);
 
-       fprintf(out, "; max_sun=%u\n", ir->max_sun);
+       fprintf(out, "; %s prog %d/%d: %u sstall, %u (ss), %u (sy), %d max_sun, %d loops\n",
+                       type, so->shader->id, so->id,
+                       so->info.sstall,
+                       so->info.ss,
+                       so->info.sy,
+                       so->max_sun,
+                       so->loops);
 
        /* print shader type specific info: */
        switch (so->type) {