#include "util/u_atomic.h"
#include "util/u_string.h"
+#include "util/u_math.h"
#include "util/u_memory.h"
#include "util/format/u_format.h"
return glsl_count_attribute_slots(type, false);
}
-static void
-delete_variant(struct ir3_shader_variant *v)
-{
- if (v->ir)
- ir3_destroy(v->ir);
- assert(!v->bo);
- if (v->binning)
- delete_variant(v->binning);
- free(v->bin);
- free(v);
-}
-
/* for vertex shader, the inputs are loaded into registers before the shader
* is executed, so max_regs from the shader instructions might not properly
* reflect the # of registers actually used, especially in case passthrough
*/
v->constlen = MAX2(v->constlen, v->info.max_const + 1);
+ /* On a4xx and newer, constlen must be a multiple of 16 dwords even though
+ * uploads are in units of 4 dwords. Round it up here to make calculations
+ * regarding the shared constlen simpler.
+ */
+ if (gpu_id >= 400)
+ v->constlen = align(v->constlen, 4);
+
fixup_regfootprint(v);
return bin;
v->ir = NULL;
}
+static bool
+compile_variant(struct ir3_shader_variant *v)
+{
+ int ret = ir3_compile_shader_nir(v->shader->compiler, v);
+ if (ret) {
+ debug_error("compile failed!");
+ return false;
+ }
+
+ assemble_variant(v);
+ if (!v->bin) {
+ debug_error("assemble failed!");
+ return false;
+ }
+
+ return true;
+}
+
/*
* For creating normal shader variants, 'nonbinning' is NULL. For
* creating binning pass shader, it is link to corresponding normal
* (non-binning) variant.
*/
static struct ir3_shader_variant *
-create_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
+alloc_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
struct ir3_shader_variant *nonbinning)
{
- struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant);
- int ret;
+ void *mem_ctx = shader;
+ /* hang the binning variant off it's non-binning counterpart instead
+ * of the shader, to simplify the error cleanup paths
+ */
+ if (nonbinning)
+ mem_ctx = nonbinning;
+ struct ir3_shader_variant *v = rzalloc_size(mem_ctx, sizeof(*v));
if (!v)
return NULL;
v->nonbinning = nonbinning;
v->key = *key;
v->type = shader->type;
+ v->mergedregs = shader->compiler->gpu_id >= 600;
- if (shader->compiler->gpu_id >= 600) {
- switch (v->type) {
- case MESA_SHADER_TESS_CTRL:
- case MESA_SHADER_TESS_EVAL:
- v->mergedregs = false;
- break;
- case MESA_SHADER_VERTEX:
- case MESA_SHADER_GEOMETRY:
- /* For VS/GS, normally do mergedregs, but if there is tess
- * we need to not used MERGEDREGS
- */
- v->mergedregs = !key->tessellation;
- break;
- default:
- v->mergedregs = true;
+ if (!v->binning_pass)
+ v->const_state = rzalloc_size(v, sizeof(*v->const_state));
+
+ return v;
+}
+
+static bool
+needs_binning_variant(struct ir3_shader_variant *v)
+{
+ if ((v->type == MESA_SHADER_VERTEX) && ir3_has_binning_vs(&v->key))
+ return true;
+ return false;
+}
+
+static struct ir3_shader_variant *
+create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
+{
+ struct ir3_shader_variant *v = alloc_variant(shader, key, NULL);
+
+ if (!v)
+ goto fail;
+
+ if (needs_binning_variant(v)) {
+ v->binning = alloc_variant(shader, key, v);
+ if (!v->binning)
+ goto fail;
+ }
+
+ if (ir3_disk_cache_retrieve(shader->compiler, v))
+ return v;
+
+ if (!shader->nir_finalized) {
+ ir3_nir_post_finalize(shader->compiler, shader->nir);
+
+ if (ir3_shader_debug & IR3_DBG_DISASM) {
+ printf("dump nir%d: type=%d", shader->id, shader->type);
+ nir_print_shader(shader->nir, stdout);
}
- } else {
- v->mergedregs = false;
+
+ shader->nir_finalized = true;
}
- ret = ir3_compile_shader_nir(shader->compiler, v);
- if (ret) {
- debug_error("compile failed!");
+ if (!compile_variant(v))
goto fail;
- }
- assemble_variant(v);
- if (!v->bin) {
- debug_error("assemble failed!");
+ if (needs_binning_variant(v) && !compile_variant(v->binning))
goto fail;
- }
+
+ ir3_disk_cache_store(shader->compiler, v);
return v;
fail:
- delete_variant(v);
+ ralloc_free(v);
return NULL;
}
static inline struct ir3_shader_variant *
-shader_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
- bool *created)
+shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key)
{
struct ir3_shader_variant *v;
- *created = false;
-
for (v = shader->variants; v; v = v->next)
if (ir3_shader_key_equal(key, &v->key))
return v;
- /* compile new variant if it doesn't exist already: */
- v = create_variant(shader, key, NULL);
- if (v) {
- v->next = shader->variants;
- shader->variants = v;
- *created = true;
- }
-
- return v;
+ return NULL;
}
struct ir3_shader_variant *
-ir3_shader_get_variant(struct ir3_shader *shader, struct ir3_shader_key *key,
+ir3_shader_get_variant(struct ir3_shader *shader, const struct ir3_shader_key *key,
bool binning_pass, bool *created)
{
mtx_lock(&shader->variants_lock);
- struct ir3_shader_variant *v =
- shader_variant(shader, key, created);
-
- if (v && binning_pass) {
- if (!v->binning) {
- v->binning = create_variant(shader, key, v);
+ struct ir3_shader_variant *v = shader_variant(shader, key);
+
+ if (!v) {
+ /* compile new variant if it doesn't exist already: */
+ v = create_variant(shader, key);
+ if (v) {
+ v->next = shader->variants;
+ shader->variants = v;
*created = true;
}
- mtx_unlock(&shader->variants_lock);
- return v->binning;
}
+
+ if (v && binning_pass) {
+ v = v->binning;
+ assert(v);
+ }
+
mtx_unlock(&shader->variants_lock);
return v;
void
ir3_shader_destroy(struct ir3_shader *shader)
{
- struct ir3_shader_variant *v, *t;
- for (v = shader->variants; v; ) {
- t = v;
- v = v->next;
- delete_variant(t);
- }
- free(shader->const_state.immediates);
ralloc_free(shader->nir);
mtx_destroy(&shader->variants_lock);
- free(shader);
+ ralloc_free(shader);
}
/**
*/
key->has_per_samp = true;
+ key->safe_constlen = true;
+
+ key->ucp_enables = 0xff;
+
if (info->stage == MESA_SHADER_FRAGMENT) {
key->fsaturate_s = ~0;
key->fsaturate_t = ~0;
key->color_two_side = true;
}
+ if (info->inputs_read & VARYING_BIT_LAYER) {
+ key->layer_zero = true;
+ }
+
if ((info->outputs_written & ~(FRAG_RESULT_DEPTH |
FRAG_RESULT_STENCIL |
FRAG_RESULT_SAMPLE_MASK)) != 0) {
}
}
+
+/* Given an array of constlen's, decrease some of them so that the sum stays
+ * within "combined_limit" while trying to fairly share the reduction. Returns
+ * a bitfield of which stages should be trimmed.
+ */
+static uint32_t
+trim_constlens(unsigned *constlens,
+ unsigned first_stage, unsigned last_stage,
+ unsigned combined_limit, unsigned safe_limit)
+{
+ unsigned cur_total = 0;
+ for (unsigned i = first_stage; i <= last_stage; i++) {
+ cur_total += constlens[i];
+ }
+
+ unsigned max_stage = 0;
+ unsigned max_const = 0;
+ uint32_t trimmed = 0;
+
+ while (cur_total > combined_limit) {
+ for (unsigned i = first_stage; i <= last_stage; i++) {
+ if (constlens[i] >= max_const) {
+ max_stage = i;
+ max_const = constlens[i];
+ }
+ }
+
+ assert(max_const > safe_limit);
+ trimmed |= 1 << max_stage;
+ cur_total = cur_total - max_const + safe_limit;
+ constlens[max_stage] = safe_limit;
+ }
+
+ return trimmed;
+}
+
+/* Figures out which stages in the pipeline to use the "safe" constlen for, in
+ * order to satisfy all shared constlen limits.
+ */
+uint32_t
+ir3_trim_constlen(struct ir3_shader_variant **variants,
+ const struct ir3_compiler *compiler)
+{
+ unsigned constlens[MESA_SHADER_STAGES] = {};
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (variants[i])
+ constlens[i] = variants[i]->constlen;
+ }
+
+ uint32_t trimmed = 0;
+ STATIC_ASSERT(MESA_SHADER_STAGES <= 8 * sizeof(trimmed));
+
+ /* There are two shared limits to take into account, the geometry limit on
+ * a6xx and the total limit. The frag limit on a6xx only matters for a
+ * single stage, so it's always satisfied with the first variant.
+ */
+ if (compiler->gpu_id >= 600) {
+ trimmed |=
+ trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_GEOMETRY,
+ compiler->max_const_geom, compiler->max_const_safe);
+ }
+ trimmed |=
+ trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_FRAGMENT,
+ compiler->max_const_pipeline, compiler->max_const_safe);
+
+ return trimmed;
+}
+
struct ir3_shader *
ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output)
{
- struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader);
+ struct ir3_shader *shader = rzalloc_size(NULL, sizeof(*shader));
mtx_init(&shader->variants_lock, mtx_plain);
shader->compiler = compiler;
shader->type = nir->info.stage;
if (stream_output)
memcpy(&shader->stream_output, stream_output, sizeof(shader->stream_output));
- shader->const_state.num_reserved_user_consts = reserved_user_consts;
-
- if (nir->info.stage == MESA_SHADER_GEOMETRY)
- NIR_PASS_V(nir, ir3_nir_lower_gs);
-
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
- (nir_lower_io_options)0);
-
- if (compiler->gpu_id >= 600 &&
- nir->info.stage == MESA_SHADER_FRAGMENT &&
- !(ir3_shader_debug & IR3_DBG_NOFP16))
- NIR_PASS_V(nir, nir_lower_mediump_outputs);
-
- if (nir->info.stage == MESA_SHADER_FRAGMENT) {
- /* NOTE: lower load_barycentric_at_sample first, since it
- * produces load_barycentric_at_offset:
- */
- NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample);
- NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset);
-
- NIR_PASS_V(nir, ir3_nir_move_varying_inputs);
- }
-
- NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
-
- NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size);
-
- /* do first pass optimization, ignoring the key: */
- ir3_optimize_nir(shader, nir, NULL);
-
+ shader->num_reserved_user_consts = reserved_user_consts;
shader->nir = nir;
- if (ir3_shader_debug & IR3_DBG_DISASM) {
- printf("dump nir%d: type=%d", shader->id, shader->type);
- nir_print_shader(shader->nir, stdout);
- }
+
+ ir3_disk_cache_init_shader_key(compiler, shader);
ir3_setup_used_key(shader);