#include "compiler/nir/nir_serialize.h"
#include "nir/tgsi_to_nir.h"
-#include "tgsi/tgsi_parse.h"
#include "util/hash_table.h"
#include "util/crc32.h"
#include "util/u_async_debug.h"
/* SHADER_CACHE */
/**
- * Return the IR binary in a buffer. For TGSI the first 4 bytes contain its
- * size as integer.
+ * Return the IR key for the shader cache.
*/
-void *si_get_ir_binary(struct si_shader_selector *sel)
+void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
+ unsigned char ir_sha1_cache_key[20])
{
- struct blob blob;
+ struct blob blob = {};
unsigned ir_size;
void *ir_binary;
- if (sel->tokens) {
- ir_binary = sel->tokens;
- ir_size = tgsi_num_tokens(sel->tokens) *
- sizeof(struct tgsi_token);
+ if (sel->nir_binary) {
+ ir_binary = sel->nir_binary;
+ ir_size = sel->nir_size;
} else {
assert(sel->nir);
blob_init(&blob);
- nir_serialize(&blob, sel->nir);
+ nir_serialize(&blob, sel->nir, true);
ir_binary = blob.data;
ir_size = blob.size;
}
- unsigned size = 4 + ir_size + sizeof(sel->so);
- char *result = (char*)MALLOC(size);
- if (!result)
- return NULL;
-
- *((uint32_t*)result) = size;
- memcpy(result + 4, ir_binary, ir_size);
- memcpy(result + 4 + ir_size, &sel->so, sizeof(sel->so));
+ /* These settings affect the compilation, but they are not derived
+ * from the input shader IR.
+ */
+ unsigned shader_variant_flags = 0;
+ if (ngg)
+ shader_variant_flags |= 1 << 0;
if (sel->nir)
+ shader_variant_flags |= 1 << 1;
+ if (si_get_wave_size(sel->screen, sel->type, ngg, es) == 32)
+ shader_variant_flags |= 1 << 2;
+ if (sel->force_correct_derivs_after_kill)
+ shader_variant_flags |= 1 << 3;
+
+ struct mesa_sha1 ctx;
+ _mesa_sha1_init(&ctx);
+ _mesa_sha1_update(&ctx, &shader_variant_flags, 4);
+ _mesa_sha1_update(&ctx, ir_binary, ir_size);
+ if (sel->type == PIPE_SHADER_VERTEX ||
+ sel->type == PIPE_SHADER_TESS_EVAL ||
+ sel->type == PIPE_SHADER_GEOMETRY)
+ _mesa_sha1_update(&ctx, &sel->so, sizeof(sel->so));
+ _mesa_sha1_final(&ctx, ir_sha1_cache_key);
+
+ if (ir_binary == blob.data)
blob_finish(&blob);
-
- return result;
}
/** Copy "data" to "ptr" and return the next dword following copied data. */
/**
* Insert a shader into the cache. It's assumed the shader is not in the cache.
* Use si_shader_cache_load_shader before calling this.
- *
- * Returns false on failure, in which case the ir_binary should be freed.
*/
-bool si_shader_cache_insert_shader(struct si_screen *sscreen, void *ir_binary,
+void si_shader_cache_insert_shader(struct si_screen *sscreen,
+ unsigned char ir_sha1_cache_key[20],
struct si_shader *shader,
bool insert_into_disk_cache)
{
struct hash_entry *entry;
uint8_t key[CACHE_KEY_SIZE];
- entry = _mesa_hash_table_search(sscreen->shader_cache, ir_binary);
+ entry = _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key);
if (entry)
- return false; /* already added */
+ return; /* already added */
hw_binary = si_get_shader_binary(shader);
if (!hw_binary)
- return false;
+ return;
- if (_mesa_hash_table_insert(sscreen->shader_cache, ir_binary,
+ if (_mesa_hash_table_insert(sscreen->shader_cache,
+ mem_dup(ir_sha1_cache_key, 20),
hw_binary) == NULL) {
FREE(hw_binary);
- return false;
+ return;
}
if (sscreen->disk_shader_cache && insert_into_disk_cache) {
- disk_cache_compute_key(sscreen->disk_shader_cache, ir_binary,
- *((uint32_t *)ir_binary), key);
+ disk_cache_compute_key(sscreen->disk_shader_cache,
+ ir_sha1_cache_key, 20, key);
disk_cache_put(sscreen->disk_shader_cache, key, hw_binary,
*((uint32_t *) hw_binary), NULL);
}
-
- return true;
}
-bool si_shader_cache_load_shader(struct si_screen *sscreen, void *ir_binary,
+bool si_shader_cache_load_shader(struct si_screen *sscreen,
+ unsigned char ir_sha1_cache_key[20],
struct si_shader *shader)
{
struct hash_entry *entry =
- _mesa_hash_table_search(sscreen->shader_cache, ir_binary);
+ _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key);
if (!entry) {
if (sscreen->disk_shader_cache) {
unsigned char sha1[CACHE_KEY_SIZE];
- size_t tg_size = *((uint32_t *) ir_binary);
disk_cache_compute_key(sscreen->disk_shader_cache,
- ir_binary, tg_size, sha1);
+ ir_sha1_cache_key, 20, sha1);
size_t binary_size;
uint8_t *buffer =
}
free(buffer);
- if (!si_shader_cache_insert_shader(sscreen, ir_binary,
- shader, false))
- FREE(ir_binary);
+ si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key,
+ shader, false);
} else {
return false;
}
} else {
- if (si_load_shader_binary(shader, entry->data))
- FREE(ir_binary);
- else
+ if (!si_load_shader_binary(shader, entry->data))
return false;
}
p_atomic_inc(&sscreen->num_shader_cache_hits);
static uint32_t si_shader_cache_key_hash(const void *key)
{
- /* The first dword is the key size. */
- return util_hash_crc32(key, *(uint32_t*)key);
+ /* Take the first dword of SHA1. */
+ return *(uint32_t*)key;
}
static bool si_shader_cache_key_equals(const void *a, const void *b)
{
- uint32_t *keya = (uint32_t*)a;
- uint32_t *keyb = (uint32_t*)b;
-
- /* The first dword is the key size. */
- if (*keya != *keyb)
- return false;
-
- return memcmp(keya, keyb, *keya) == 0;
+ /* Compare SHA1s. */
+ return memcmp(a, b, 20) == 0;
}
static void si_destroy_shader_cache_entry(struct hash_entry *entry)
bool si_init_shader_cache(struct si_screen *sscreen)
{
- (void) mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
sscreen->shader_cache =
_mesa_hash_table_create(NULL,
si_shader_cache_key_hash,
if (sscreen->shader_cache)
_mesa_hash_table_destroy(sscreen->shader_cache,
si_destroy_shader_cache_entry);
- mtx_destroy(&sscreen->shader_cache_mutex);
+ simple_mtx_destroy(&sscreen->shader_cache_mutex);
}
/* SHADER STATES */
else
topology = V_028B6C_OUTPUT_TRIANGLE_CW;
- if (sscreen->has_distributed_tess) {
+ if (sscreen->info.has_distributed_tess) {
if (sscreen->info.family == CHIP_FIJI ||
sscreen->info.family >= CHIP_POLARIS10)
distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
return num_always_on_user_sgprs + 1;
}
+/* Return VGPR_COMP_CNT for the API vertex shader. This can be hw LS, LSHS, ES, ESGS, VS. */
+static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen,
+ struct si_shader *shader, bool legacy_vs_prim_id)
+{
+ assert(shader->selector->type == PIPE_SHADER_VERTEX ||
+ (shader->previous_stage_sel &&
+ shader->previous_stage_sel->type == PIPE_SHADER_VERTEX));
+
+ /* GFX6-9 LS (VertexID, RelAutoindex, InstanceID / StepRate0(==1), ...).
+ * GFX6-9 ES,VS (VertexID, InstanceID / StepRate0(==1), VSPrimID, ...)
+ * GFX10 LS (VertexID, RelAutoindex, UserVGPR1, InstanceID).
+ * GFX10 ES,VS (VertexID, UserVGPR0, UserVGPR1 or VSPrimID, UserVGPR2 or InstanceID)
+ */
+ bool is_ls = shader->selector->type == PIPE_SHADER_TESS_CTRL || shader->key.as_ls;
+
+ if (sscreen->info.chip_class >= GFX10 && shader->info.uses_instanceid)
+ return 3;
+ else if ((is_ls && shader->info.uses_instanceid) || legacy_vs_prim_id)
+ return 2;
+ else if (is_ls || shader->info.uses_instanceid)
+ return 1;
+ else
+ return 0;
+}
+
static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_pm4_state *pm4;
- unsigned vgpr_comp_cnt;
uint64_t va;
assert(sscreen->info.chip_class <= GFX8);
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- /* We need at least 2 components for LS.
- * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
- * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
- */
- vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
-
si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40));
shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
+ S_00B528_VGPR_COMP_CNT(si_get_vs_vgpr_comp_cnt(sscreen, shader, false)) |
S_00B528_DX10_CLAMP(1) |
S_00B528_FLOAT_MODE(shader->config.float_mode);
shader->config.rsrc2 = S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR)) |
{
struct si_pm4_state *pm4;
uint64_t va;
- unsigned ls_vgpr_comp_cnt = 0;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40));
}
- /* We need at least 2 components for LS.
- * GFX9 VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
- * GFX10 VGPR0-3: (VertexID, RelAutoindex, UserVGPR1, InstanceID).
- * On gfx9, StepRate0 is set to 1 so that VGPR3 doesn't have to
- * be loaded.
- */
- ls_vgpr_comp_cnt = 1;
- if (shader->info.uses_instanceid) {
- if (sscreen->info.chip_class >= GFX10)
- ls_vgpr_comp_cnt = 3;
- else
- ls_vgpr_comp_cnt = 2;
- }
-
unsigned num_user_sgprs =
si_get_num_vs_user_sgprs(GFX9_TCS_NUM_USER_SGPR);
S_00B428_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
S_00B428_WGP_MODE(sscreen->info.chip_class >= GFX10) |
S_00B428_FLOAT_MODE(shader->config.float_mode) |
- S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt));
+ S_00B428_LS_VGPR_COMP_CNT(sscreen->info.chip_class >= GFX9 ?
+ si_get_vs_vgpr_comp_cnt(sscreen, shader, false) : 0));
if (sscreen->info.chip_class <= GFX8) {
si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
if (shader->selector->type == PIPE_SHADER_VERTEX) {
- /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
+ vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
num_user_sgprs = si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR);
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
unsigned es_type = shader->key.part.gs.es->type;
unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
- if (es_type == PIPE_SHADER_VERTEX)
- /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- es_vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
- else if (es_type == PIPE_SHADER_TESS_EVAL)
+ if (es_type == PIPE_SHADER_VERTEX) {
+ es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
+ } else if (es_type == PIPE_SHADER_TESS_EVAL)
es_vgpr_comp_cnt = shader->key.part.gs.es->info.uses_primid ? 3 : 2;
else
unreachable("invalid shader selector type");
SI_TRACKED_PA_CL_NGG_CNTL,
shader->ctx_reg.ngg.pa_cl_ngg_cntl);
+ radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS,
+ shader->pa_cl_vs_out_cntl,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK);
+
if (initial_cdw != sctx->gfx_cs->current.cdw)
sctx->context_roll = true;
}
return PIPE_PRIM_TRIANGLES; /* worst case for all callers */
}
+static unsigned si_get_vs_out_cntl(const struct si_shader_selector *sel, bool ngg)
+{
+ bool misc_vec_ena =
+ sel->info.writes_psize || (sel->info.writes_edgeflag && !ngg) ||
+ sel->info.writes_layer || sel->info.writes_viewport_index;
+ return S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) |
+ S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag && !ngg) |
+ S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
+ S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
+ S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
+ S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
+}
+
/**
* Prepare the PM4 image for \p shader, which will run as a merged ESGS shader
* in NGG mode.
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
if (es_type == PIPE_SHADER_VERTEX) {
- /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
- es_vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 0;
+ es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
if (es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
shader->ctx_reg.ngg.vgt_primitiveid_en =
S_028A84_PRIMITIVEID_EN(es_enable_prim_id) |
- S_028A84_NGG_DISABLE_PROVOK_REUSE(es_enable_prim_id);
+ S_028A84_NGG_DISABLE_PROVOK_REUSE(shader->key.mono.u.vs_export_prim_id ||
+ gs_sel->info.writes_primid);
if (gs_type == PIPE_SHADER_GEOMETRY) {
shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = es_sel->esgs_itemsize / 4;
*/
shader->ctx_reg.ngg.pa_cl_ngg_cntl =
S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type == PIPE_SHADER_VERTEX);
+ shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(gs_sel, true);
shader->ge_cntl =
S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) |
- S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts) |
+ S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
/* Bug workaround for a possible hang with non-tessellation cases.
SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
shader->vgt_vertex_reuse_block_cntl);
+ /* Required programming for tessellation. (legacy pipeline only) */
+ if (sctx->chip_class == GFX10 &&
+ shader->selector->type == PIPE_SHADER_TESS_EVAL) {
+ radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
+ SI_TRACKED_VGT_GS_ONCHIP_CNTL,
+ S_028A44_ES_VERTS_PER_SUBGRP(250) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(126) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
+ }
+
+ if (sctx->chip_class >= GFX10) {
+ radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS,
+ shader->pa_cl_vs_out_cntl,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK);
+ }
+
if (initial_cdw != sctx->gfx_cs->current.cdw)
sctx->context_roll = true;
}
vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
} else if (shader->selector->type == PIPE_SHADER_VERTEX) {
- if (sscreen->info.chip_class >= GFX10) {
- vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : (enable_prim_id ? 2 : 0);
- } else {
- /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
- * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
- * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
- */
- vgpr_comp_cnt = enable_prim_id ? 2 : (shader->info.uses_instanceid ? 1 : 0);
- }
+ vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, enable_prim_id);
if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE);
+ shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, false);
oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
if (sctx->tes_shader.cso)
key->as_ls = 1;
- else if (sctx->gs_shader.cso)
+ else if (sctx->gs_shader.cso) {
key->as_es = 1;
- else {
+ key->as_ngg = stages_key.u.ngg;
+ } else {
key->as_ngg = stages_key.u.ngg;
si_shader_selector_key_hw_vs(sctx, sel, key);
compiler = shader->compiler_ctx_state.compiler;
}
+ if (!compiler->passes)
+ si_init_compiler(sscreen, compiler);
+
if (unlikely(!si_shader_create(sscreen, compiler, shader, debug))) {
PRINT_ERR("Failed to build shader variant (type=%u)\n",
sel->type);
main_part->key.as_ngg = key->as_ngg;
main_part->is_monolithic = false;
- if (si_compile_tgsi_shader(sscreen, compiler_state->compiler,
+ if (si_compile_shader(sscreen, compiler_state->compiler,
main_part, &compiler_state->debug) != 0) {
FREE(main_part);
return false;
if (thread_index < 0)
util_queue_fence_wait(&sel->ready);
- mtx_lock(&sel->mutex);
+ simple_mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
/* If it's an optimized shader and its compilation has
/* Build a new shader. */
shader = CALLOC_STRUCT(si_shader);
if (!shader) {
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
return -ENOMEM;
}
if (previous_stage_sel) {
struct si_shader_key shader1_key = zeroed;
- if (sel->type == PIPE_SHADER_TESS_CTRL)
+ if (sel->type == PIPE_SHADER_TESS_CTRL) {
shader1_key.as_ls = 1;
- else if (sel->type == PIPE_SHADER_GEOMETRY)
+ } else if (sel->type == PIPE_SHADER_GEOMETRY) {
shader1_key.as_es = 1;
- else
+ shader1_key.as_ngg = key->as_ngg; /* for Wave32 vs Wave64 */
+ } else {
assert(0);
+ }
- if (sel->type == PIPE_SHADER_GEOMETRY &&
- previous_stage_sel->type == PIPE_SHADER_TESS_EVAL)
- shader1_key.as_ngg = key->as_ngg;
-
- mtx_lock(&previous_stage_sel->mutex);
+ simple_mtx_lock(&previous_stage_sel->mutex);
ok = si_check_missing_main_part(sscreen,
previous_stage_sel,
compiler_state, &shader1_key);
- mtx_unlock(&previous_stage_sel->mutex);
+ simple_mtx_unlock(&previous_stage_sel->mutex);
}
if (ok) {
if (!ok) {
FREE(shader);
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
return -ENOMEM; /* skip the draw call */
}
}
/* Compile it asynchronously. */
util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
shader, &shader->ready,
- si_build_shader_variant_low_priority, NULL);
+ si_build_shader_variant_low_priority, NULL,
+ 0);
/* Add only after the ready fence was reset, to guard against a
* race with si_bind_XX_shader. */
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
if (sscreen->options.sync_compile)
util_queue_fence_wait(&shader->ready);
sel->last_variant = shader;
}
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
assert(!shader->is_optimized);
si_build_shader_variant(shader, thread_index, false);
assert(thread_index < ARRAY_SIZE(sscreen->compiler));
compiler = &sscreen->compiler[thread_index];
+ if (!compiler->passes)
+ si_init_compiler(sscreen, compiler);
+
+ /* Serialize NIR to save memory. Monolithic shader variants
+ * have to deserialize NIR before compilation.
+ */
if (sel->nir) {
- /* TODO: GS always sets wave size = default. Legacy GS will have
- * incorrect subgroup_size and ballot_bit_size. */
- si_lower_nir(sel, si_get_wave_size(sscreen, sel->type, true, false));
+ struct blob blob;
+ size_t size;
+
+ blob_init(&blob);
+ /* true = remove optional debugging data to increase
+ * the likehood of getting more shader cache hits.
+ * It also drops variable names, so we'll save more memory.
+ */
+ nir_serialize(&blob, sel->nir, true);
+ blob_finish_get_buffer(&blob, &sel->nir_binary, &size);
+ sel->nir_size = size;
}
/* Compile the main shader part for use with a prolog and/or epilog.
*/
if (!sscreen->use_monolithic_shaders) {
struct si_shader *shader = CALLOC_STRUCT(si_shader);
- void *ir_binary = NULL;
+ unsigned char ir_sha1_cache_key[20];
if (!shader) {
fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
if (sscreen->use_ngg &&
(!sel->so.num_outputs || sscreen->use_ngg_streamout) &&
- ((sel->type == PIPE_SHADER_VERTEX &&
- !shader->key.as_ls && !shader->key.as_es) ||
+ ((sel->type == PIPE_SHADER_VERTEX && !shader->key.as_ls) ||
sel->type == PIPE_SHADER_TESS_EVAL ||
sel->type == PIPE_SHADER_GEOMETRY))
shader->key.as_ngg = 1;
- if (sel->tokens || sel->nir)
- ir_binary = si_get_ir_binary(sel);
+ if (sel->nir) {
+ si_get_ir_cache_key(sel, shader->key.as_ngg,
+ shader->key.as_es, ir_sha1_cache_key);
+ }
/* Try to load the shader from the shader cache. */
- mtx_lock(&sscreen->shader_cache_mutex);
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
- if (ir_binary &&
- si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ if (si_shader_cache_load_shader(sscreen, ir_sha1_cache_key, shader)) {
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
} else {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
/* Compile the shader if it hasn't been loaded from the cache. */
- if (si_compile_tgsi_shader(sscreen, compiler, shader,
+ if (si_compile_shader(sscreen, compiler, shader,
debug) != 0) {
FREE(shader);
- FREE(ir_binary);
fprintf(stderr, "radeonsi: can't compile a main shader part\n");
return;
}
- if (ir_binary) {
- mtx_lock(&sscreen->shader_cache_mutex);
- if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
- FREE(ir_binary);
- mtx_unlock(&sscreen->shader_cache_mutex);
- }
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
+ si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key,
+ shader, true);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
}
*si_get_main_shader_part(sel, &shader->key) = shader;
/* The GS copy shader is always pre-compiled. */
if (sel->type == PIPE_SHADER_GEOMETRY &&
- (!sscreen->use_ngg || sel->tess_turns_off_ngg)) {
+ (!sscreen->use_ngg ||
+ !sscreen->use_ngg_streamout || /* also for PRIMITIVES_GENERATED */
+ sel->tess_turns_off_ngg)) {
sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, compiler, sel, debug);
if (!sel->gs_copy_shader) {
fprintf(stderr, "radeonsi: can't create GS copy shader\n");
si_shader_vs(sscreen, sel->gs_copy_shader, sel);
}
+
+ /* Free NIR. We only keep serialized NIR after this point. */
+ if (sel->nir) {
+ ralloc_free(sel->nir);
+ sel->nir = NULL;
+ }
}
void si_schedule_initial_compile(struct si_context *sctx, unsigned processor,
}
util_queue_add_job(&sctx->screen->shader_compiler_queue, job,
- ready_fence, execute, NULL);
+ ready_fence, execute, NULL, 0);
if (debug) {
util_queue_fence_wait(ready_fence);
uint32_t *const_and_shader_buffers,
uint64_t *samplers_and_images)
{
- unsigned start, num_shaderbufs, num_constbufs, num_images, num_samplers;
+ unsigned start, num_shaderbufs, num_constbufs, num_images, num_msaa_images, num_samplers;
num_shaderbufs = util_last_bit(info->shader_buffers_declared);
num_constbufs = util_last_bit(info->const_buffers_declared);
/* two 8-byte images share one 16-byte slot */
num_images = align(util_last_bit(info->images_declared), 2);
+ num_msaa_images = align(util_last_bit(info->msaa_images_declared), 2);
num_samplers = util_last_bit(info->samplers_declared);
/* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
*const_and_shader_buffers =
u_bit_consecutive(start, num_shaderbufs + num_constbufs);
- /* The layout is: image[last] ... image[0], sampler[0] ... sampler[last] */
+ /* The layout is:
+ * - fmask[last] ... fmask[0] go to [15-last .. 15]
+ * - image[last] ... image[0] go to [31-last .. 31]
+ * - sampler[0] ... sampler[last] go to [32 .. 32+last*2]
+ *
+ * FMASKs for images are placed separately, because MSAA images are rare,
+ * and so we can benefit from a better cache hit rate if we keep image
+ * descriptors together.
+ */
+ if (num_msaa_images)
+ num_images = SI_NUM_IMAGES + num_msaa_images; /* add FMASK descriptors */
+
start = si_get_image_slot(num_images - 1) / 2;
*samplers_and_images =
u_bit_consecutive64(start, num_images / 2 + num_samplers);
sel->so = state->stream_output;
- if (state->type == PIPE_SHADER_IR_TGSI &&
- !sscreen->options.enable_nir) {
- sel->tokens = tgsi_dup_tokens(state->tokens);
- if (!sel->tokens) {
- FREE(sel);
- return NULL;
- }
-
- tgsi_scan_shader(state->tokens, &sel->info);
- tgsi_scan_tess_ctrl(state->tokens, &sel->info, &sel->tcs_info);
-
- /* Fixup for TGSI: Set which opcode uses which (i,j) pair. */
- if (sel->info.uses_persp_opcode_interp_centroid)
- sel->info.uses_persp_centroid = true;
-
- if (sel->info.uses_linear_opcode_interp_centroid)
- sel->info.uses_linear_centroid = true;
-
- if (sel->info.uses_persp_opcode_interp_offset ||
- sel->info.uses_persp_opcode_interp_sample)
- sel->info.uses_persp_center = true;
-
- if (sel->info.uses_linear_opcode_interp_offset ||
- sel->info.uses_linear_opcode_interp_sample)
- sel->info.uses_linear_center = true;
+ if (state->type == PIPE_SHADER_IR_TGSI) {
+ sel->nir = tgsi_to_nir(state->tokens, ctx->screen);
} else {
- if (state->type == PIPE_SHADER_IR_TGSI) {
- sel->nir = tgsi_to_nir(state->tokens, ctx->screen);
- } else {
- assert(state->type == PIPE_SHADER_IR_NIR);
- sel->nir = state->ir.nir;
- }
-
- si_nir_lower_ps_inputs(sel->nir);
- si_nir_opts(sel->nir);
- si_nir_scan_shader(sel->nir, &sel->info);
- si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info);
+ assert(state->type == PIPE_SHADER_IR_NIR);
+ sel->nir = state->ir.nir;
}
+ si_nir_scan_shader(sel->nir, &sel->info);
+ si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info);
+ si_nir_adjust_driver_locations(sel->nir);
+
sel->type = sel->info.processor;
p_atomic_inc(&sscreen->num_shaders_created);
si_get_active_slot_masks(&sel->info,
(sel->so.output[i].stream * 4);
}
+ sel->num_vs_inputs = sel->type == PIPE_SHADER_VERTEX &&
+ !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] ?
+ sel->info.num_inputs : 0;
+
/* The prolog is a no-op if there are no inputs. */
sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX &&
sel->info.num_inputs &&
!sel->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] &&
!sel->so.num_outputs;
- if (sel->type == PIPE_SHADER_VERTEX &&
- sel->info.writes_edgeflag) {
- if (sscreen->info.chip_class >= GFX10)
- sel->ngg_writes_edgeflag = true;
- else
- sel->pos_writes_edgeflag = true;
- }
-
switch (sel->type) {
case PIPE_SHADER_GEOMETRY:
sel->gs_output_prim =
/* EN_MAX_VERT_OUT_PER_GS_INSTANCE does not work with tesselation. */
sel->tess_turns_off_ngg =
- (sscreen->info.family == CHIP_NAVI10 ||
- sscreen->info.family == CHIP_NAVI12 ||
- sscreen->info.family == CHIP_NAVI14) &&
+ sscreen->info.chip_class == GFX10 &&
sel->gs_num_invocations * sel->gs_max_out_vertices > 256;
break;
}
/* PA_CL_VS_OUT_CNTL */
- bool misc_vec_ena =
- sel->info.writes_psize || sel->pos_writes_edgeflag ||
- sel->info.writes_layer || sel->info.writes_viewport_index;
- sel->pa_cl_vs_out_cntl =
- S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) |
- S_02881C_USE_VTX_EDGE_FLAG(sel->pos_writes_edgeflag) |
- S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
- S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
- S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
- S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
+ if (sctx->chip_class <= GFX9)
+ sel->pa_cl_vs_out_cntl = si_get_vs_out_cntl(sel, false);
+
sel->clipdist_mask = sel->info.writes_clipvertex ?
SIX_BITS : sel->info.clipdist_writemask;
sel->culldist_mask = sel->info.culldist_writemask <<
if (sel->info.properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE])
sel->db_shader_control |= S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(1);
- (void) mtx_init(&sel->mutex, mtx_plain);
+ (void) simple_mtx_init(&sel->mutex, mtx_plain);
si_schedule_initial_compile(sctx, sel->info.processor, &sel->ready,
&sel->compiler_ctx_state, sel,
sctx->vs_shader.current = sel ? sel->first_variant : NULL;
sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] : 0;
+ if (si_update_ngg(sctx))
+ si_shader_change_notify(sctx);
+
si_update_common_shader_state(sctx);
si_update_vs_viewport_state(sctx);
si_set_active_descriptors_for_shader(sctx, sel);
sctx->ps_shader.cso->info.uses_primid);
}
-static bool si_update_ngg(struct si_context *sctx)
+bool si_update_ngg(struct si_context *sctx)
{
- if (!sctx->screen->use_ngg)
+ if (!sctx->screen->use_ngg) {
+ assert(!sctx->ngg);
return false;
+ }
bool new_ngg = true;
if (sctx->gs_shader.cso && sctx->tes_shader.cso &&
- sctx->gs_shader.cso->tess_turns_off_ngg)
+ sctx->gs_shader.cso->tess_turns_off_ngg) {
new_ngg = false;
+ } else if (!sctx->screen->use_ngg_streamout) {
+ struct si_shader_selector *last = si_get_vs(sctx)->cso;
+
+ if ((last && last->so.num_outputs) ||
+ sctx->streamout.prims_gen_query_enabled)
+ new_ngg = false;
+ }
if (new_ngg != sctx->ngg) {
/* Transitioning from NGG to legacy GS requires VGT_FLUSH on Navi10-14.
sctx->flags |= SI_CONTEXT_VGT_FLUSH;
sctx->ngg = new_ngg;
- sctx->last_rast_prim = -1; /* reset this so that it gets updated */
+ sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
return true;
}
return false;
sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
si_update_common_shader_state(sctx);
- sctx->last_rast_prim = -1; /* reset this so that it gets updated */
+ sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
ngg_changed = si_update_ngg(sctx);
if (ngg_changed || enable_changed)
si_update_tess_uses_prim_id(sctx);
si_update_common_shader_state(sctx);
- sctx->last_rast_prim = -1; /* reset this so that it gets updated */
+ sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */
- if (enable_changed) {
- si_update_ngg(sctx);
+ bool ngg_changed = si_update_ngg(sctx);
+ if (ngg_changed || enable_changed)
si_shader_change_notify(sctx);
+ if (enable_changed)
sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
- }
si_update_vs_viewport_state(sctx);
si_set_active_descriptors_for_shader(sctx, sel);
si_update_streamout_state(sctx);
si_delete_shader(sctx, sel->gs_copy_shader);
util_queue_fence_destroy(&sel->ready);
- mtx_destroy(&sel->mutex);
- free(sel->tokens);
+ simple_mtx_destroy(&sel->mutex);
ralloc_free(sel->nir);
+ free(sel->nir_binary);
free(sel);
}
pipe_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
- esgs_ring_size, alignment);
+ esgs_ring_size,
+ sctx->screen->info.pte_fragment_size);
if (!sctx->esgs_ring)
return false;
}
pipe_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
- gsvs_ring_size, alignment);
+ gsvs_ring_size,
+ sctx->screen->info.pte_fragment_size);
if (!sctx->gsvs_ring)
return false;
}
static void si_shader_lock(struct si_shader *shader)
{
- mtx_lock(&shader->selector->mutex);
+ simple_mtx_lock(&shader->selector->mutex);
if (shader->previous_stage_sel) {
assert(shader->previous_stage_sel != shader->selector);
- mtx_lock(&shader->previous_stage_sel->mutex);
+ simple_mtx_lock(&shader->previous_stage_sel->mutex);
}
}
static void si_shader_unlock(struct si_shader *shader)
{
if (shader->previous_stage_sel)
- mtx_unlock(&shader->previous_stage_sel->mutex);
- mtx_unlock(&shader->selector->mutex);
+ simple_mtx_unlock(&shader->previous_stage_sel->mutex);
+ simple_mtx_unlock(&shader->selector->mutex);
}
/**
return 1;
}
-static unsigned si_get_current_scratch_buffer_size(struct si_context *sctx)
-{
- return sctx->scratch_buffer ? sctx->scratch_buffer->b.b.width0 : 0;
-}
-
static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader)
{
return shader ? shader->config.scratch_bytes_per_wave : 0;
sctx->fixed_func_tcs_shader.current;
}
-static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx)
-{
- unsigned bytes = 0;
-
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
-
- if (sctx->tes_shader.cso) {
- struct si_shader *tcs = si_get_tcs_current(sctx);
-
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(tcs));
- }
- return bytes;
-}
-
static bool si_update_scratch_relocs(struct si_context *sctx)
{
struct si_shader *tcs = si_get_tcs_current(sctx);
static bool si_update_spi_tmpring_size(struct si_context *sctx)
{
- unsigned current_scratch_buffer_size =
- si_get_current_scratch_buffer_size(sctx);
- unsigned scratch_bytes_per_wave =
- si_get_max_scratch_bytes_per_wave(sctx);
- unsigned scratch_needed_size = scratch_bytes_per_wave *
- sctx->scratch_waves;
+ /* SPI_TMPRING_SIZE.WAVESIZE must be constant for each scratch buffer.
+ * There are 2 cases to handle:
+ *
+ * - If the current needed size is less than the maximum seen size,
+ * use the maximum seen size, so that WAVESIZE remains the same.
+ *
+ * - If the current needed size is greater than the maximum seen size,
+ * the scratch buffer is reallocated, so we can increase WAVESIZE.
+ *
+ * Shaders that set SCRATCH_EN=0 don't allocate scratch space.
+ * Otherwise, the number of waves that can use scratch is
+ * SPI_TMPRING_SIZE.WAVES.
+ */
+ unsigned bytes = 0;
+
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
+
+ if (sctx->tes_shader.cso) {
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(si_get_tcs_current(sctx)));
+ }
+
+ sctx->max_seen_scratch_bytes_per_wave =
+ MAX2(sctx->max_seen_scratch_bytes_per_wave, bytes);
+
+ unsigned scratch_needed_size =
+ sctx->max_seen_scratch_bytes_per_wave * sctx->scratch_waves;
unsigned spi_tmpring_size;
if (scratch_needed_size > 0) {
- if (scratch_needed_size > current_scratch_buffer_size) {
+ if (!sctx->scratch_buffer ||
+ scratch_needed_size > sctx->scratch_buffer->b.b.width0) {
/* Create a bigger scratch buffer */
si_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
- SI_RESOURCE_FLAG_UNMAPPABLE,
- PIPE_USAGE_DEFAULT,
- scratch_needed_size, 256);
+ SI_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_needed_size,
+ sctx->screen->info.pte_fragment_size);
if (!sctx->scratch_buffer)
return false;
"scratch size should already be aligned correctly.");
spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) |
- S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
+ S_0286E8_WAVESIZE(sctx->max_seen_scratch_bytes_per_wave >> 10);
if (spi_tmpring_size != sctx->spi_tmpring_size) {
sctx->spi_tmpring_size = spi_tmpring_size;
si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
static void si_init_tess_factor_ring(struct si_context *sctx)
{
assert(!sctx->tess_rings);
+ assert(((sctx->screen->tess_factor_ring_size / 4) & C_030938_SIZE) == 0);
/* The address must be aligned to 2^19, because the shader only
* receives the high 13 bits.
}
if (key.u.ngg) {
- stages |= S_028B54_PRIMGEN_EN(1);
- if (key.u.streamout)
- stages |= S_028B54_NGG_WAVE_ID_EN(1);
+ stages |= S_028B54_PRIMGEN_EN(1) |
+ S_028B54_NGG_WAVE_ID_EN(key.u.streamout) |
+ S_028B54_PRIMGEN_PASSTHRU_EN(key.u.ngg_passthrough);
} else if (key.u.gs)
stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0;
int r;
+ if (!sctx->compiler.passes)
+ si_init_compiler(sctx->screen, &sctx->compiler);
+
compiler_state.compiler = &sctx->compiler;
compiler_state.debug = sctx->debug;
compiler_state.is_debug_context = sctx->is_debug;
}
}
+ /* This must be done after the shader variant is selected. */
+ if (sctx->ngg)
+ key.u.ngg_passthrough = gfx10_is_ngg_passthrough(si_get_vs(sctx)->current);
+
si_update_vgt_shader_config(sctx, key);
if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable)
si_mark_atom_dirty(sctx, &sctx->atoms.s.spi_map);
}
- if (sctx->screen->rbplus_allowed &&
+ if (sctx->screen->info.rbplus_allowed &&
si_pm4_state_changed(sctx, ps) &&
(!old_ps ||
old_spi_shader_col_format !=