X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Ffreedreno%2Fir3%2Fir3_shader.c;h=984ae57418daf5397af96177a273773713fc682e;hb=36a90468486252cb35cdea3d67ace991f1fb4bd0;hp=66f72c98dc1f4a8939b29b23db7757ca533b4e62;hpb=00926954c30aabbb67fe839924e433bd30001c9d;p=mesa.git diff --git a/src/freedreno/ir3/ir3_shader.c b/src/freedreno/ir3/ir3_shader.c index 66f72c98dc1..984ae57418d 100644 --- a/src/freedreno/ir3/ir3_shader.c +++ b/src/freedreno/ir3/ir3_shader.c @@ -26,6 +26,7 @@ #include "util/u_atomic.h" #include "util/u_string.h" +#include "util/u_math.h" #include "util/u_memory.h" #include "util/format/u_format.h" @@ -140,6 +141,13 @@ void * ir3_shader_assemble(struct ir3_shader_variant *v) */ v->constlen = MAX2(v->constlen, v->info.max_const + 1); + /* On a4xx and newer, constlen must be a multiple of 16 dwords even though + * uploads are in units of 4 dwords. Round it up here to make calculations + * regarding the shared constlen simpler. + */ + if (gpu_id >= 400) + v->constlen = align(v->constlen, 4); + fixup_regfootprint(v); return bin; @@ -163,17 +171,40 @@ assemble_variant(struct ir3_shader_variant *v) v->ir = NULL; } +static bool +compile_variant(struct ir3_shader_variant *v) +{ + int ret = ir3_compile_shader_nir(v->shader->compiler, v); + if (ret) { + debug_error("compile failed!"); + return false; + } + + assemble_variant(v); + if (!v->bin) { + debug_error("assemble failed!"); + return false; + } + + return true; +} + /* * For creating normal shader variants, 'nonbinning' is NULL. For * creating binning pass shader, it is link to corresponding normal * (non-binning) variant. */ static struct ir3_shader_variant * -create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key, +alloc_variant(struct ir3_shader *shader, const struct ir3_shader_key *key, struct ir3_shader_variant *nonbinning) { - struct ir3_shader_variant *v = rzalloc_size(shader, sizeof(*v)); - int ret; + void *mem_ctx = shader; + /* hang the binning variant off it's non-binning counterpart instead + * of the shader, to simplify the error cleanup paths + */ + if (nonbinning) + mem_ctx = nonbinning; + struct ir3_shader_variant *v = rzalloc_size(mem_ctx, sizeof(*v)); if (!v) return NULL; @@ -184,38 +215,57 @@ create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key, v->nonbinning = nonbinning; v->key = *key; v->type = shader->type; + v->mergedregs = shader->compiler->gpu_id >= 600; - if (shader->compiler->gpu_id >= 600) { - switch (v->type) { - case MESA_SHADER_TESS_CTRL: - case MESA_SHADER_TESS_EVAL: - v->mergedregs = false; - break; - case MESA_SHADER_VERTEX: - case MESA_SHADER_GEOMETRY: - /* For VS/GS, normally do mergedregs, but if there is tess - * we need to not used MERGEDREGS - */ - v->mergedregs = !key->tessellation; - break; - default: - v->mergedregs = true; + if (!v->binning_pass) + v->const_state = rzalloc_size(v, sizeof(*v->const_state)); + + return v; +} + +static bool +needs_binning_variant(struct ir3_shader_variant *v) +{ + if ((v->type == MESA_SHADER_VERTEX) && ir3_has_binning_vs(&v->key)) + return true; + return false; +} + +static struct ir3_shader_variant * +create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key) +{ + struct ir3_shader_variant *v = alloc_variant(shader, key, NULL); + + if (!v) + goto fail; + + if (needs_binning_variant(v)) { + v->binning = alloc_variant(shader, key, v); + if (!v->binning) + goto fail; + } + + if (ir3_disk_cache_retrieve(shader->compiler, v)) + return v; + + if (!shader->nir_finalized) { + ir3_nir_post_finalize(shader->compiler, shader->nir); + + if (ir3_shader_debug & IR3_DBG_DISASM) { + printf("dump nir%d: type=%d", shader->id, shader->type); + nir_print_shader(shader->nir, stdout); } - } else { - v->mergedregs = false; + + shader->nir_finalized = true; } - ret = ir3_compile_shader_nir(shader->compiler, v); - if (ret) { - debug_error("compile failed!"); + if (!compile_variant(v)) goto fail; - } - assemble_variant(v); - if (!v->bin) { - debug_error("assemble failed!"); + if (needs_binning_variant(v) && !compile_variant(v->binning)) goto fail; - } + + ir3_disk_cache_store(shader->compiler, v); return v; @@ -225,26 +275,15 @@ fail: } static inline struct ir3_shader_variant * -shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key, - bool *created) +shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key) { struct ir3_shader_variant *v; - *created = false; - for (v = shader->variants; v; v = v->next) if (ir3_shader_key_equal(key, &v->key)) return v; - /* compile new variant if it doesn't exist already: */ - v = create_variant(shader, key, NULL); - if (v) { - v->next = shader->variants; - shader->variants = v; - *created = true; - } - - return v; + return NULL; } struct ir3_shader_variant * @@ -252,17 +291,23 @@ ir3_shader_get_variant(struct ir3_shader *shader, const struct ir3_shader_key *k bool binning_pass, bool *created) { mtx_lock(&shader->variants_lock); - struct ir3_shader_variant *v = - shader_variant(shader, key, created); - - if (v && binning_pass) { - if (!v->binning) { - v->binning = create_variant(shader, key, v); + struct ir3_shader_variant *v = shader_variant(shader, key); + + if (!v) { + /* compile new variant if it doesn't exist already: */ + v = create_variant(shader, key); + if (v) { + v->next = shader->variants; + shader->variants = v; *created = true; } - mtx_unlock(&shader->variants_lock); - return v->binning; } + + if (v && binning_pass) { + v = v->binning; + assert(v); + } + mtx_unlock(&shader->variants_lock); return v; @@ -293,6 +338,10 @@ ir3_setup_used_key(struct ir3_shader *shader) */ key->has_per_samp = true; + key->safe_constlen = true; + + key->ucp_enables = 0xff; + if (info->stage == MESA_SHADER_FRAGMENT) { key->fsaturate_s = ~0; key->fsaturate_t = ~0; @@ -332,6 +381,75 @@ ir3_setup_used_key(struct ir3_shader *shader) } } + +/* Given an array of constlen's, decrease some of them so that the sum stays + * within "combined_limit" while trying to fairly share the reduction. Returns + * a bitfield of which stages should be trimmed. + */ +static uint32_t +trim_constlens(unsigned *constlens, + unsigned first_stage, unsigned last_stage, + unsigned combined_limit, unsigned safe_limit) +{ + unsigned cur_total = 0; + for (unsigned i = first_stage; i <= last_stage; i++) { + cur_total += constlens[i]; + } + + unsigned max_stage; + unsigned max_const = 0; + uint32_t trimmed = 0; + + while (cur_total > combined_limit) { + for (unsigned i = first_stage; i <= last_stage; i++) { + if (constlens[i] >= max_const) { + max_stage = i; + max_const = constlens[i]; + } + } + + assert(max_const > safe_limit); + trimmed |= 1 << max_stage; + cur_total = cur_total - max_const + safe_limit; + constlens[max_stage] = safe_limit; + } + + return trimmed; +} + +/* Figures out which stages in the pipeline to use the "safe" constlen for, in + * order to satisfy all shared constlen limits. + */ +uint32_t +ir3_trim_constlen(struct ir3_shader_variant **variants, + const struct ir3_compiler *compiler) +{ + unsigned constlens[MESA_SHADER_STAGES] = {}; + + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + if (variants[i]) + constlens[i] = variants[i]->constlen; + } + + uint32_t trimmed = 0; + STATIC_ASSERT(MESA_SHADER_STAGES <= 8 * sizeof(trimmed)); + + /* There are two shared limits to take into account, the geometry limit on + * a6xx and the total limit. The frag limit on a6xx only matters for a + * single stage, so it's always satisfied with the first variant. + */ + if (compiler->gpu_id >= 600) { + trimmed |= + trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_GEOMETRY, + compiler->max_const_geom, compiler->max_const_safe); + } + trimmed |= + trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_FRAGMENT, + compiler->max_const_pipeline, compiler->max_const_safe); + + return trimmed; +} + struct ir3_shader * ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir, unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output) @@ -345,41 +463,9 @@ ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir, if (stream_output) memcpy(&shader->stream_output, stream_output, sizeof(shader->stream_output)); shader->num_reserved_user_consts = reserved_user_consts; - shader->const_state = rzalloc_size(shader, sizeof(*shader->const_state)); - - if (nir->info.stage == MESA_SHADER_GEOMETRY) - NIR_PASS_V(nir, ir3_nir_lower_gs); - - NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, - (nir_lower_io_options)0); - - if (compiler->gpu_id >= 600 && - nir->info.stage == MESA_SHADER_FRAGMENT && - !(ir3_shader_debug & IR3_DBG_NOFP16)) - NIR_PASS_V(nir, nir_lower_mediump_outputs); - - if (nir->info.stage == MESA_SHADER_FRAGMENT) { - /* NOTE: lower load_barycentric_at_sample first, since it - * produces load_barycentric_at_offset: - */ - NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_sample); - NIR_PASS_V(nir, ir3_nir_lower_load_barycentric_at_offset); - - NIR_PASS_V(nir, ir3_nir_move_varying_inputs); - } - - NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false); - - NIR_PASS_V(nir, nir_lower_amul, ir3_glsl_type_size); - - /* do first pass optimization, ignoring the key: */ - ir3_optimize_nir(shader, nir); - shader->nir = nir; - if (ir3_shader_debug & IR3_DBG_DISASM) { - printf("dump nir%d: type=%d", shader->id, shader->type); - nir_print_shader(shader->nir, stdout); - } + + ir3_disk_cache_init_shader_key(compiler, shader); ir3_setup_used_key(shader);