#include "si_pipe.h"
#include "sid.h"
+#include "gfx9d.h"
#include "radeon/r600_cs.h"
#include "tgsi/tgsi_parse.h"
#include "util/u_memory.h"
#include "util/u_prim.h"
+#include "util/disk_cache.h"
+#include "util/mesa-sha1.h"
+#include "ac_exp_param.h"
+
/* SHADER_CACHE */
/**
/* There is always a size of data followed by the data itself. */
unsigned relocs_size = shader->binary.reloc_count *
sizeof(shader->binary.relocs[0]);
- unsigned disasm_size = strlen(shader->binary.disasm_string) + 1;
+ unsigned disasm_size = shader->binary.disasm_string ?
+ strlen(shader->binary.disasm_string) + 1 : 0;
unsigned llvm_ir_size = shader->binary.llvm_ir_string ?
strlen(shader->binary.llvm_ir_string) + 1 : 0;
unsigned size =
*/
static bool si_shader_cache_insert_shader(struct si_screen *sscreen,
void *tgsi_binary,
- struct si_shader *shader)
+ struct si_shader *shader,
+ bool insert_into_disk_cache)
{
void *hw_binary;
struct hash_entry *entry;
+ uint8_t key[CACHE_KEY_SIZE];
entry = _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
if (entry)
return false;
}
+ if (sscreen->b.disk_shader_cache && insert_into_disk_cache) {
+ disk_cache_compute_key(sscreen->b.disk_shader_cache, tgsi_binary,
+ *((uint32_t *)tgsi_binary), key);
+ disk_cache_put(sscreen->b.disk_shader_cache, key, hw_binary,
+ *((uint32_t *) hw_binary));
+ }
+
return true;
}
{
struct hash_entry *entry =
_mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
- if (!entry)
- return false;
+ if (!entry) {
+ if (sscreen->b.disk_shader_cache) {
+ unsigned char sha1[CACHE_KEY_SIZE];
+ size_t tg_size = *((uint32_t *) tgsi_binary);
+
+ disk_cache_compute_key(sscreen->b.disk_shader_cache,
+ tgsi_binary, tg_size, sha1);
+
+ size_t binary_size;
+ uint8_t *buffer =
+ disk_cache_get(sscreen->b.disk_shader_cache,
+ sha1, &binary_size);
+ if (!buffer)
+ return false;
- if (!si_load_shader_binary(shader, entry->data))
- return false;
+ if (binary_size < sizeof(uint32_t) ||
+ *((uint32_t*)buffer) != binary_size) {
+ /* Something has gone wrong discard the item
+ * from the cache and rebuild/link from
+ * source.
+ */
+ assert(!"Invalid radeonsi shader disk cache "
+ "item!");
+
+ disk_cache_remove(sscreen->b.disk_shader_cache,
+ sha1);
+ free(buffer);
+ return false;
+ }
+
+ if (!si_load_shader_binary(shader, buffer)) {
+ free(buffer);
+ return false;
+ }
+ free(buffer);
+
+ if (!si_shader_cache_insert_shader(sscreen, tgsi_binary,
+ shader, false))
+ FREE(tgsi_binary);
+ } else {
+ return false;
+ }
+ } else {
+ if (si_load_shader_binary(shader, entry->data))
+ FREE(tgsi_binary);
+ else
+ return false;
+ }
p_atomic_inc(&sscreen->b.num_shader_cache_hits);
return true;
}
bool si_init_shader_cache(struct si_screen *sscreen)
{
- pipe_mutex_init(sscreen->shader_cache_mutex);
+ (void) mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
sscreen->shader_cache =
_mesa_hash_table_create(NULL,
si_shader_cache_key_hash,
si_shader_cache_key_equals);
+
return sscreen->shader_cache != NULL;
}
if (sscreen->shader_cache)
_mesa_hash_table_destroy(sscreen->shader_cache,
si_destroy_shader_cache_entry);
- pipe_mutex_destroy(sscreen->shader_cache_mutex);
+ mtx_destroy(&sscreen->shader_cache_mutex);
}
/* SHADER STATES */
static void si_set_tesseval_regs(struct si_screen *sscreen,
- struct si_shader *shader,
+ struct si_shader_selector *tes,
struct si_pm4_state *pm4)
{
- struct tgsi_shader_info *info = &shader->selector->info;
+ struct tgsi_shader_info *info = &tes->info;
unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
S_028B6C_DISTRIBUTION_MODE(distribution_mode));
}
+/* Polaris needs different VTX_REUSE_DEPTH settings depending on
+ * whether the "fractional odd" tessellation spacing is used.
+ *
+ * Possible VGT configurations and which state should set the register:
+ *
+ * Reg set in | VGT shader configuration | Value
+ * ------------------------------------------------------
+ * VS as VS | VS | 30
+ * VS as ES | ES -> GS -> VS | 30
+ * TES as VS | LS -> HS -> VS | 14 or 30
+ * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
+ *
+ * If "shader" is NULL, it's assumed it's not LS or GS copy shader.
+ */
+static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen,
+ struct si_shader_selector *sel,
+ struct si_shader *shader,
+ struct si_pm4_state *pm4)
+{
+ unsigned type = sel->type;
+
+ if (sscreen->b.family < CHIP_POLARIS10)
+ return;
+
+ /* VS as VS, or VS as ES: */
+ if ((type == PIPE_SHADER_VERTEX &&
+ (!shader ||
+ (!shader->key.as_ls && !shader->is_gs_copy_shader))) ||
+ /* TES as VS, or TES as ES: */
+ type == PIPE_SHADER_TESS_EVAL) {
+ unsigned vtx_reuse_depth = 30;
+
+ if (type == PIPE_SHADER_TESS_EVAL &&
+ sel->info.properties[TGSI_PROPERTY_TES_SPACING] ==
+ PIPE_TESS_SPACING_FRACTIONAL_ODD)
+ vtx_reuse_depth = 14;
+
+ si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ vtx_reuse_depth);
+ }
+}
+
static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader)
{
if (shader->pm4)
return shader->pm4;
}
-static void si_shader_ls(struct si_shader *shader)
+static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_pm4_state *pm4;
unsigned vgpr_comp_cnt;
uint64_t va;
+ assert(sscreen->b.chip_class <= VI);
+
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
return;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
/* We need at least 2 components for LS.
- * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
- vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 1;
+ * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, va >> 40);
S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
S_00B528_DX10_CLAMP(1) |
S_00B528_FLOAT_MODE(shader->config.float_mode);
- shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_LS_NUM_USER_SGPR) |
+ shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_VS_NUM_USER_SGPR) |
S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
}
-static void si_shader_hs(struct si_shader *shader)
+static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_pm4_state *pm4;
uint64_t va;
+ unsigned ls_vgpr_comp_cnt = 0;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
- si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
+ if (sscreen->b.chip_class >= GFX9) {
+ si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, va >> 40);
+
+ /* We need at least 2 components for LS.
+ * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ ls_vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
+
+ if (shader->config.scratch_bytes_per_wave) {
+ fprintf(stderr, "HS: scratch buffer unsupported");
+ abort();
+ }
+
+ shader->config.rsrc2 =
+ S_00B42C_USER_SGPR(GFX9_TCS_NUM_USER_SGPR) |
+ S_00B42C_USER_SGPR_MSB(GFX9_TCS_NUM_USER_SGPR >> 5) |
+ S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
+ } else {
+ si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
+
+ shader->config.rsrc2 =
+ S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR) |
+ S_00B42C_OC_LDS_EN(1) |
+ S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
+ }
+
si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) |
S_00B428_DX10_CLAMP(1) |
- S_00B428_FLOAT_MODE(shader->config.float_mode));
- si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
- S_00B42C_USER_SGPR(SI_TCS_NUM_USER_SGPR) |
- S_00B42C_OC_LDS_EN(1) |
- S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+ S_00B428_FLOAT_MODE(shader->config.float_mode) |
+ S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt));
+
+ if (sscreen->b.chip_class <= VI) {
+ si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
+ shader->config.rsrc2);
+ }
}
static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
uint64_t va;
unsigned oc_lds_en;
+ assert(sscreen->b.chip_class <= VI);
+
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
return;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
if (shader->selector->type == PIPE_SHADER_VERTEX) {
- vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 0;
- num_user_sgprs = SI_ES_NUM_USER_SGPR;
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
+ vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
+ num_user_sgprs = SI_VS_NUM_USER_SGPR;
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
- vgpr_comp_cnt = 3; /* all components are needed for TES */
+ vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
num_user_sgprs = SI_TES_NUM_USER_SGPR;
} else
unreachable("invalid shader selector type");
S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
- si_set_tesseval_regs(sscreen, shader, pm4);
+ si_set_tesseval_regs(sscreen, shader->selector, pm4);
+
+ polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
}
/**
*/
static uint32_t si_vgt_gs_mode(struct si_shader_selector *sel)
{
+ enum chip_class chip_class = sel->screen->b.chip_class;
unsigned gs_max_vert_out = sel->gs_max_out_vertices;
unsigned cut_mode;
return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
S_028A40_CUT_MODE(cut_mode)|
- S_028A40_ES_WRITE_OPTIMIZE(1) |
- S_028A40_GS_WRITE_OPTIMIZE(1);
+ S_028A40_ES_WRITE_OPTIMIZE(chip_class <= VI) |
+ S_028A40_GS_WRITE_OPTIMIZE(1) |
+ S_028A40_ONCHIP(chip_class >= GFX9 ? 1 : 0);
+}
+
+struct gfx9_gs_info {
+ unsigned es_verts_per_subgroup;
+ unsigned gs_prims_per_subgroup;
+ unsigned gs_inst_prims_in_subgroup;
+ unsigned max_prims_per_subgroup;
+ unsigned lds_size;
+};
+
+static void gfx9_get_gs_info(struct si_shader_selector *es,
+ struct si_shader_selector *gs,
+ struct gfx9_gs_info *out)
+{
+ unsigned gs_num_invocations = MAX2(gs->gs_num_invocations, 1);
+ unsigned input_prim = gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
+ bool uses_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
+ input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
+
+ /* All these are in dwords: */
+ /* We can't allow using the whole LDS, because GS waves compete with
+ * other shader stages for LDS space. */
+ const unsigned max_lds_size = 8 * 1024;
+ const unsigned esgs_itemsize = es->esgs_itemsize / 4;
+ unsigned esgs_lds_size;
+
+ /* All these are per subgroup: */
+ const unsigned max_out_prims = 32 * 1024;
+ const unsigned max_es_verts = 255;
+ const unsigned ideal_gs_prims = 64;
+ unsigned max_gs_prims, gs_prims;
+ unsigned min_es_verts, es_verts, worst_case_es_verts;
+
+ assert(gs_num_invocations <= 32); /* GL maximum */
+
+ if (uses_adjacency || gs_num_invocations > 1)
+ max_gs_prims = 127 / gs_num_invocations;
+ else
+ max_gs_prims = 255;
+
+ /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
+ * Make sure we don't go over the maximum value.
+ */
+ max_gs_prims = MIN2(max_gs_prims,
+ max_out_prims /
+ (gs->gs_max_out_vertices * gs_num_invocations));
+ assert(max_gs_prims > 0);
+
+ /* If the primitive has adjacency, halve the number of vertices
+ * that will be reused in multiple primitives.
+ */
+ min_es_verts = gs->gs_input_verts_per_prim / (uses_adjacency ? 2 : 1);
+
+ gs_prims = MIN2(ideal_gs_prims, max_gs_prims);
+ worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
+
+ /* Compute ESGS LDS size based on the worst case number of ES vertices
+ * needed to create the target number of GS prims per subgroup.
+ */
+ esgs_lds_size = esgs_itemsize * worst_case_es_verts;
+
+ /* If total LDS usage is too big, refactor partitions based on ratio
+ * of ESGS item sizes.
+ */
+ if (esgs_lds_size > max_lds_size) {
+ /* Our target GS Prims Per Subgroup was too large. Calculate
+ * the maximum number of GS Prims Per Subgroup that will fit
+ * into LDS, capped by the maximum that the hardware can support.
+ */
+ gs_prims = MIN2((max_lds_size / (esgs_itemsize * min_es_verts)),
+ max_gs_prims);
+ assert(gs_prims > 0);
+ worst_case_es_verts = MIN2(min_es_verts * gs_prims,
+ max_es_verts);
+
+ esgs_lds_size = esgs_itemsize * worst_case_es_verts;
+ assert(esgs_lds_size <= max_lds_size);
+ }
+
+ /* Now calculate remaining ESGS information. */
+ if (esgs_lds_size)
+ es_verts = MIN2(esgs_lds_size / esgs_itemsize, max_es_verts);
+ else
+ es_verts = max_es_verts;
+
+ /* Vertices for adjacency primitives are not always reused, so restore
+ * it for ES_VERTS_PER_SUBGRP.
+ */
+ min_es_verts = gs->gs_input_verts_per_prim;
+
+ /* For normal primitives, the VGT only checks if they are past the ES
+ * verts per subgroup after allocating a full GS primitive and if they
+ * are, kick off a new subgroup. But if those additional ES verts are
+ * unique (e.g. not reused) we need to make sure there is enough LDS
+ * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
+ */
+ es_verts -= min_es_verts - 1;
+
+ out->es_verts_per_subgroup = es_verts;
+ out->gs_prims_per_subgroup = gs_prims;
+ out->gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations;
+ out->max_prims_per_subgroup = out->gs_inst_prims_in_subgroup *
+ gs->gs_max_out_vertices;
+ out->lds_size = align(esgs_lds_size, 128) / 128;
+
+ assert(out->max_prims_per_subgroup <= max_out_prims);
}
-static void si_shader_gs(struct si_shader *shader)
+static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_shader_selector *sel = shader->selector;
const ubyte *num_components = sel->info.num_stream_output_components;
if (!pm4)
return;
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(shader->selector));
-
offset = num_components[0] * sel->gs_max_out_vertices;
si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, offset);
if (max_stream >= 1)
/* The GSVS_RING_ITEMSIZE register takes 15 bits */
assert(offset < (1 << 15));
- si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, shader->selector->gs_max_out_vertices);
+ si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, sel->gs_max_out_vertices);
si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, num_components[0]);
si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? num_components[1] : 0);
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
- si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
- si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
- S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
- S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B228_DX10_CLAMP(1) |
- S_00B228_FLOAT_MODE(shader->config.float_mode));
- si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
- S_00B22C_USER_SGPR(SI_GS_NUM_USER_SGPR) |
- S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+ if (sscreen->b.chip_class >= GFX9) {
+ unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
+ unsigned es_type = shader->key.part.gs.es->type;
+ unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
+ struct gfx9_gs_info gs_info;
+
+ if (es_type == PIPE_SHADER_VERTEX)
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
+ es_vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
+ else if (es_type == PIPE_SHADER_TESS_EVAL)
+ es_vgpr_comp_cnt = shader->key.part.gs.es->info.uses_primid ? 3 : 2;
+ else
+ unreachable("invalid shader selector type");
+
+ /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
+ * VGPR[0:4] are always loaded.
+ */
+ if (sel->info.uses_invocationid)
+ gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
+ else if (sel->info.uses_primid)
+ gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
+ else if (input_prim >= PIPE_PRIM_TRIANGLES)
+ gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
+ else
+ gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
+
+ gfx9_get_gs_info(shader->key.part.gs.es, sel, &gs_info);
+
+ si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
+ si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, va >> 40);
+
+ si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
+ S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
+ S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
+ S_00B228_DX10_CLAMP(1) |
+ S_00B228_FLOAT_MODE(shader->config.float_mode) |
+ S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
+ si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
+ S_00B22C_USER_SGPR(GFX9_GS_NUM_USER_SGPR) |
+ S_00B22C_USER_SGPR_MSB(GFX9_GS_NUM_USER_SGPR >> 5) |
+ S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
+ S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
+ S_00B22C_LDS_SIZE(gs_info.lds_size) |
+ S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+
+ si_pm4_set_reg(pm4, R_028A44_VGT_GS_ONCHIP_CNTL,
+ S_028A44_ES_VERTS_PER_SUBGRP(gs_info.es_verts_per_subgroup) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(gs_info.gs_prims_per_subgroup) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info.gs_inst_prims_in_subgroup));
+ si_pm4_set_reg(pm4, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
+ S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info.max_prims_per_subgroup));
+ si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
+ shader->key.part.gs.es->esgs_itemsize / 4);
+
+ if (es_type == PIPE_SHADER_TESS_EVAL)
+ si_set_tesseval_regs(sscreen, shader->key.part.gs.es, pm4);
+
+ polaris_set_vgt_vertex_reuse(sscreen, shader->key.part.gs.es,
+ NULL, pm4);
+
+ if (shader->config.scratch_bytes_per_wave) {
+ fprintf(stderr, "GS: scratch buffer unsupported");
+ abort();
+ }
+ } else {
+ si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
+
+ si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
+ S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
+ S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
+ S_00B228_DX10_CLAMP(1) |
+ S_00B228_FLOAT_MODE(shader->config.float_mode));
+ si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
+ S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR) |
+ S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+ }
}
/**
unsigned oc_lds_en;
unsigned window_space =
shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
- bool enable_prim_id = si_vs_exports_prim_id(shader);
+ bool enable_prim_id = shader->key.mono.vs_export_prim_id;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
* not sent again.
*/
if (!gs) {
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
- S_028A40_MODE(enable_prim_id ? V_028A40_GS_SCENARIO_A : 0));
+ unsigned mode = 0;
+
+ /* PrimID needs GS scenario A.
+ * GFX9 also needs it when ViewportIndex is enabled.
+ */
+ if (enable_prim_id ||
+ (sscreen->b.chip_class >= GFX9 &&
+ shader->selector->info.writes_viewport_index))
+ mode = V_028A40_GS_SCENARIO_A;
+
+ si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, S_028A40_MODE(mode));
si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
} else {
si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs));
vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
} else if (shader->selector->type == PIPE_SHADER_VERTEX) {
- vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : (enable_prim_id ? 2 : 0);
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
+ * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ vgpr_comp_cnt = enable_prim_id ? 2 : (shader->info.uses_instanceid ? 1 : 0);
num_user_sgprs = SI_VS_NUM_USER_SGPR;
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
- vgpr_comp_cnt = 3; /* all components are needed for TES */
+ vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
num_user_sgprs = SI_TES_NUM_USER_SGPR;
} else
unreachable("invalid shader selector type");
S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
- si_set_tesseval_regs(sscreen, shader, pm4);
+ si_set_tesseval_regs(sscreen, shader->selector, pm4);
+
+ polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
}
static unsigned si_get_ps_num_interp(struct si_shader *ps)
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
if (shader->key.as_ls)
- si_shader_ls(shader);
+ si_shader_ls(sscreen, shader);
else if (shader->key.as_es)
si_shader_es(sscreen, shader);
else
si_shader_vs(sscreen, shader, NULL);
break;
case PIPE_SHADER_TESS_CTRL:
- si_shader_hs(shader);
+ si_shader_hs(sscreen, shader);
break;
case PIPE_SHADER_TESS_EVAL:
if (shader->key.as_es)
si_shader_vs(sscreen, shader, NULL);
break;
case PIPE_SHADER_GEOMETRY:
- si_shader_gs(shader);
+ si_shader_gs(sscreen, shader);
break;
case PIPE_SHADER_FRAGMENT:
si_shader_ps(shader);
return PIPE_FUNC_ALWAYS;
}
+static void si_shader_selector_key_vs(struct si_context *sctx,
+ struct si_shader_selector *vs,
+ struct si_shader_key *key,
+ struct si_vs_prolog_bits *prolog_key)
+{
+ if (!sctx->vertex_elements)
+ return;
+
+ unsigned count = MIN2(vs->info.num_inputs,
+ sctx->vertex_elements->count);
+ for (unsigned i = 0; i < count; ++i) {
+ prolog_key->instance_divisors[i] =
+ sctx->vertex_elements->elements[i].instance_divisor;
+ }
+
+ memcpy(key->mono.vs_fix_fetch, sctx->vertex_elements->fix_fetch, count);
+}
+
static void si_shader_selector_key_hw_vs(struct si_context *sctx,
struct si_shader_selector *vs,
struct si_shader_key *key)
struct si_shader_key *key)
{
struct si_context *sctx = (struct si_context *)ctx;
- unsigned i;
memset(key, 0, sizeof(*key));
switch (sel->type) {
case PIPE_SHADER_VERTEX:
- if (sctx->vertex_elements) {
- unsigned count = MIN2(sel->info.num_inputs,
- sctx->vertex_elements->count);
- for (i = 0; i < count; ++i)
- key->part.vs.prolog.instance_divisors[i] =
- sctx->vertex_elements->elements[i].instance_divisor;
-
- key->mono.vs.fix_fetch =
- sctx->vertex_elements->fix_fetch &
- u_bit_consecutive64(0, 4 * count);
- }
+ si_shader_selector_key_vs(sctx, sel, key, &key->part.vs.prolog);
+
if (sctx->tes_shader.cso)
key->as_ls = 1;
else if (sctx->gs_shader.cso)
si_shader_selector_key_hw_vs(sctx, sel, key);
if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
- key->part.vs.epilog.export_prim_id = 1;
+ key->mono.vs_export_prim_id = 1;
}
break;
case PIPE_SHADER_TESS_CTRL:
+ if (sctx->b.chip_class >= GFX9) {
+ si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
+ key, &key->part.tcs.ls_prolog);
+ key->part.tcs.ls = sctx->vs_shader.cso;
+ }
+
key->part.tcs.epilog.prim_mode =
sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ key->part.tcs.epilog.tes_reads_tess_factors =
+ sctx->tes_shader.cso->info.reads_tess_factors;
if (sel == sctx->fixed_func_tcs_shader.cso)
- key->mono.tcs.inputs_to_copy = sctx->vs_shader.cso->outputs_written;
+ key->mono.ff_tcs_inputs_to_copy = sctx->vs_shader.cso->outputs_written;
break;
case PIPE_SHADER_TESS_EVAL:
if (sctx->gs_shader.cso)
si_shader_selector_key_hw_vs(sctx, sel, key);
if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
- key->part.tes.epilog.export_prim_id = 1;
+ key->mono.vs_export_prim_id = 1;
}
break;
case PIPE_SHADER_GEOMETRY:
+ if (sctx->b.chip_class >= GFX9) {
+ if (sctx->tes_shader.cso) {
+ key->part.gs.es = sctx->tes_shader.cso;
+ } else {
+ si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
+ key, &key->part.gs.vs_prolog);
+ key->part.gs.es = sctx->vs_shader.cso;
+ }
+
+ /* Merged ES-GS can have unbalanced wave usage.
+ *
+ * ES threads are per-vertex, while GS threads are
+ * per-primitive. So without any amplification, there
+ * are fewer GS threads than ES threads, which can result
+ * in empty (no-op) GS waves. With too much amplification,
+ * there are more GS threads than ES threads, which
+ * can result in empty (no-op) ES waves.
+ *
+ * Non-monolithic shaders are implemented by setting EXEC
+ * at the beginning of shader parts, and don't jump to
+ * the end if EXEC is 0.
+ *
+ * Monolithic shaders use conditional blocks, so they can
+ * jump and skip empty waves of ES or GS. So set this to
+ * always use optimized variants, which are monolithic.
+ */
+ key->opt.prefer_mono = 1;
+ }
key->part.gs.prolog.tri_strip_adj_fix = sctx->gs_tri_strip_adj_fix;
break;
case PIPE_SHADER_FRAGMENT: {
* to the range supported by the type if a channel has less
* than 16 bits and the export format is 16_ABGR.
*/
- if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII)
+ if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII) {
key->part.ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
+ key->part.ps.epilog.color_is_int10 = sctx->framebuffer.color_is_int10;
+ }
/* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
if (!key->part.ps.epilog.last_cbuf) {
key->part.ps.epilog.spi_shader_col_format &= sel->colors_written_4bit;
key->part.ps.epilog.color_is_int8 &= sel->info.colors_written;
+ key->part.ps.epilog.color_is_int10 &= sel->info.colors_written;
}
if (rs) {
si_shader_init_pm4_state(sscreen, shader);
}
+static const struct si_shader_key zeroed;
+
+static bool si_check_missing_main_part(struct si_screen *sscreen,
+ struct si_shader_selector *sel,
+ struct si_compiler_ctx_state *compiler_state,
+ struct si_shader_key *key)
+{
+ struct si_shader **mainp = si_get_main_shader_part(sel, key);
+
+ if (!*mainp) {
+ struct si_shader *main_part = CALLOC_STRUCT(si_shader);
+
+ if (!main_part)
+ return false;
+
+ main_part->selector = sel;
+ main_part->key.as_es = key->as_es;
+ main_part->key.as_ls = key->as_ls;
+
+ if (si_compile_tgsi_shader(sscreen, compiler_state->tm,
+ main_part, false,
+ &compiler_state->debug) != 0) {
+ FREE(main_part);
+ return false;
+ }
+ *mainp = main_part;
+ }
+ return true;
+}
+
+static void si_destroy_shader_selector(struct si_context *sctx,
+ struct si_shader_selector *sel);
+
+static void si_shader_selector_reference(struct si_context *sctx,
+ struct si_shader_selector **dst,
+ struct si_shader_selector *src)
+{
+ if (pipe_reference(&(*dst)->reference, &src->reference))
+ si_destroy_shader_selector(sctx, *dst);
+
+ *dst = src;
+}
+
/* Select the hw shader variant depending on the current state. */
static int si_shader_select_with_key(struct si_screen *sscreen,
struct si_shader_ctx_state *state,
struct si_shader_key *key,
int thread_index)
{
- static const struct si_shader_key zeroed;
struct si_shader_selector *sel = state->cso;
+ struct si_shader_selector *previous_stage_sel = NULL;
struct si_shader *current = state->current;
struct si_shader *iter, *shader = NULL;
memcmp(¤t->key, key, sizeof(*key)) == 0 &&
(!current->is_optimized ||
util_queue_fence_is_signalled(¤t->optimized_ready))))
- return 0;
+ return current->compilation_failed ? -1 : 0;
/* This must be done before the mutex is locked, because async GS
* compilation calls this function too, and therefore must enter
* in a compiler thread.
*/
if (thread_index < 0)
- util_queue_job_wait(&sel->ready);
+ util_queue_fence_wait(&sel->ready);
- pipe_mutex_lock(sel->mutex);
+ mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
if (iter->is_optimized &&
!util_queue_fence_is_signalled(&iter->optimized_ready)) {
memset(&key->opt, 0, sizeof(key->opt));
- pipe_mutex_unlock(sel->mutex);
+ mtx_unlock(&sel->mutex);
goto again;
}
if (iter->compilation_failed) {
- pipe_mutex_unlock(sel->mutex);
+ mtx_unlock(&sel->mutex);
return -1; /* skip the draw call */
}
state->current = iter;
- pipe_mutex_unlock(sel->mutex);
+ mtx_unlock(&sel->mutex);
return 0;
}
}
/* Build a new shader. */
shader = CALLOC_STRUCT(si_shader);
if (!shader) {
- pipe_mutex_unlock(sel->mutex);
+ mtx_unlock(&sel->mutex);
return -ENOMEM;
}
shader->selector = sel;
shader->key = *key;
shader->compiler_ctx_state = *compiler_state;
+ /* If this is a merged shader, get the first shader's selector. */
+ if (sscreen->b.chip_class >= GFX9) {
+ if (sel->type == PIPE_SHADER_TESS_CTRL)
+ previous_stage_sel = key->part.tcs.ls;
+ else if (sel->type == PIPE_SHADER_GEOMETRY)
+ previous_stage_sel = key->part.gs.es;
+ }
+
+ /* Compile the main shader part if it doesn't exist. This can happen
+ * if the initial guess was wrong. */
+ bool is_pure_monolithic =
+ sscreen->use_monolithic_shaders ||
+ memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0;
+
+ if (!is_pure_monolithic) {
+ bool ok;
+
+ /* Make sure the main shader part is present. This is needed
+ * for shaders that can be compiled as VS, LS, or ES, and only
+ * one of them is compiled at creation.
+ *
+ * For merged shaders, check that the starting shader's main
+ * part is present.
+ */
+ if (previous_stage_sel) {
+ struct si_shader_key shader1_key = zeroed;
+
+ if (sel->type == PIPE_SHADER_TESS_CTRL)
+ shader1_key.as_ls = 1;
+ else if (sel->type == PIPE_SHADER_GEOMETRY)
+ shader1_key.as_es = 1;
+ else
+ assert(0);
+
+ ok = si_check_missing_main_part(sscreen,
+ previous_stage_sel,
+ compiler_state, &shader1_key);
+ } else {
+ ok = si_check_missing_main_part(sscreen, sel,
+ compiler_state, key);
+ }
+ if (!ok) {
+ FREE(shader);
+ mtx_unlock(&sel->mutex);
+ return -ENOMEM; /* skip the draw call */
+ }
+ }
+
+ /* Keep the reference to the 1st shader of merged shaders, so that
+ * Gallium can't destroy it before we destroy the 2nd shader.
+ *
+ * Set sctx = NULL, because it's unused if we're not releasing
+ * the shader, and we don't have any sctx here.
+ */
+ si_shader_selector_reference(NULL, &shader->previous_stage_sel,
+ previous_stage_sel);
+
/* Monolithic-only shaders don't make a distinction between optimized
* and unoptimized. */
shader->is_monolithic =
- !sel->main_shader_part ||
- sel->main_shader_part->key.as_ls != key->as_ls ||
- sel->main_shader_part->key.as_es != key->as_es ||
- memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0 ||
- memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0;
+ is_pure_monolithic ||
+ memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
shader->is_optimized =
- !sscreen->use_monolithic_shaders &&
+ !is_pure_monolithic &&
memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
if (shader->is_optimized)
util_queue_fence_init(&shader->optimized_ready);
/* If it's an optimized shader, compile it asynchronously. */
if (shader->is_optimized &&
+ !is_pure_monolithic &&
thread_index < 0) {
/* Compile it asynchronously. */
util_queue_add_job(&sscreen->shader_compiler_queue,
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
- pipe_mutex_unlock(sel->mutex);
+ mtx_unlock(&sel->mutex);
goto again;
}
if (!shader->compilation_failed)
state->current = shader;
- pipe_mutex_unlock(sel->mutex);
+ mtx_unlock(&sel->mutex);
return shader->compilation_failed ? -1 : 0;
}
tgsi_binary = si_get_tgsi_binary(sel);
/* Try to load the shader from the shader cache. */
- pipe_mutex_lock(sscreen->shader_cache_mutex);
+ mtx_lock(&sscreen->shader_cache_mutex);
if (tgsi_binary &&
si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
- FREE(tgsi_binary);
- pipe_mutex_unlock(sscreen->shader_cache_mutex);
+ mtx_unlock(&sscreen->shader_cache_mutex);
} else {
- pipe_mutex_unlock(sscreen->shader_cache_mutex);
+ mtx_unlock(&sscreen->shader_cache_mutex);
/* Compile the shader if it hasn't been loaded from the cache. */
if (si_compile_tgsi_shader(sscreen, tm, shader, false,
}
if (tgsi_binary) {
- pipe_mutex_lock(sscreen->shader_cache_mutex);
- if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader))
+ mtx_lock(&sscreen->shader_cache_mutex);
+ if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
FREE(tgsi_binary);
- pipe_mutex_unlock(sscreen->shader_cache_mutex);
+ mtx_unlock(&sscreen->shader_cache_mutex);
}
}
- sel->main_shader_part = shader;
+ *si_get_main_shader_part(sel, &shader->key) = shader;
/* Unset "outputs_written" flags for outputs converted to
* DEFAULT_VAL, so that later inter-shader optimizations don't
for (i = 0; i < sel->info.num_outputs; i++) {
unsigned offset = shader->info.vs_output_param_offset[i];
- if (offset <= EXP_PARAM_OFFSET_31)
+ if (offset <= AC_EXP_PARAM_OFFSET_31)
continue;
unsigned name = sel->info.output_semantic_name[i];
if (!sel)
return NULL;
+ pipe_reference_init(&sel->reference, 1);
sel->screen = sscreen;
sel->compiler_ctx_state.tm = sctx->tm;
sel->compiler_ctx_state.debug = sctx->b.debug;
sel->type = sel->info.processor;
p_atomic_inc(&sscreen->b.num_shaders_created);
+ /* The prolog is a no-op if there are no inputs. */
+ sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX &&
+ sel->info.num_inputs;
+
/* Set which opcode uses which (i,j) pair. */
if (sel->info.uses_persp_opcode_interp_centroid)
sel->info.uses_persp_centroid = true;
}
}
sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16;
+
+ /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
+ * conflicts, i.e. each vertex will start at a different bank.
+ */
+ if (sctx->b.chip_class >= GFX9)
+ sel->esgs_itemsize += 4;
break;
case PIPE_SHADER_FRAGMENT:
sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
}
- pipe_mutex_init(sel->mutex);
+ (void) mtx_init(&sel->mutex, mtx_plain);
util_queue_fence_init(&sel->ready);
if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
sctx->is_debug ||
- r600_can_dump_shader(&sscreen->b, sel->info.processor) ||
- !util_queue_is_initialized(&sscreen->shader_compiler_queue))
+ r600_can_dump_shader(&sscreen->b, sel->info.processor))
si_init_shader_selector_async(sel, -1);
else
util_queue_add_job(&sscreen->shader_compiler_queue, sel,
sctx->gs_shader.cso = sel;
sctx->gs_shader.current = sel ? sel->first_variant : NULL;
+ sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
sctx->do_update_shaders = true;
si_mark_atom_dirty(sctx, &sctx->clip_regs);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
}
+static void si_update_tcs_tes_uses_prim_id(struct si_context *sctx)
+{
+ sctx->ia_multi_vgt_param_key.u.tcs_tes_uses_prim_id =
+ (sctx->tes_shader.cso &&
+ sctx->tes_shader.cso->info.uses_primid) ||
+ (sctx->tcs_shader.cso &&
+ sctx->tcs_shader.cso->info.uses_primid);
+}
+
static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
sctx->tcs_shader.cso = sel;
sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
+ si_update_tcs_tes_uses_prim_id(sctx);
sctx->do_update_shaders = true;
if (enable_changed)
sctx->tes_shader.cso = sel;
sctx->tes_shader.current = sel ? sel->first_variant : NULL;
+ sctx->ia_multi_vgt_param_key.u.uses_tess = sel != NULL;
+ si_update_tcs_tes_uses_prim_id(sctx);
sctx->do_update_shaders = true;
si_mark_atom_dirty(sctx, &sctx->clip_regs);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
{
if (shader->is_optimized) {
- util_queue_job_wait(&shader->optimized_ready);
+ util_queue_fence_wait(&shader->optimized_ready);
util_queue_fence_destroy(&shader->optimized_ready);
}
if (shader->pm4) {
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
- if (shader->key.as_ls)
+ if (shader->key.as_ls) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_delete_state(sctx, ls, shader->pm4);
- else if (shader->key.as_es)
+ } else if (shader->key.as_es) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_delete_state(sctx, es, shader->pm4);
- else
+ } else {
si_pm4_delete_state(sctx, vs, shader->pm4);
+ }
break;
case PIPE_SHADER_TESS_CTRL:
si_pm4_delete_state(sctx, hs, shader->pm4);
break;
case PIPE_SHADER_TESS_EVAL:
- if (shader->key.as_es)
+ if (shader->key.as_es) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_delete_state(sctx, es, shader->pm4);
- else
+ } else {
si_pm4_delete_state(sctx, vs, shader->pm4);
+ }
break;
case PIPE_SHADER_GEOMETRY:
if (shader->is_gs_copy_shader)
}
}
+ si_shader_selector_reference(sctx, &shader->previous_stage_sel, NULL);
si_shader_destroy(shader);
free(shader);
}
-static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
+static void si_destroy_shader_selector(struct si_context *sctx,
+ struct si_shader_selector *sel)
{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_shader_selector *sel = (struct si_shader_selector *)state;
struct si_shader *p = sel->first_variant, *c;
struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
[PIPE_SHADER_VERTEX] = &sctx->vs_shader,
[PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
};
- util_queue_job_wait(&sel->ready);
+ util_queue_fence_wait(&sel->ready);
if (current_shader[sel->type]->cso == sel) {
current_shader[sel->type]->cso = NULL;
if (sel->main_shader_part)
si_delete_shader(sctx, sel->main_shader_part);
+ if (sel->main_shader_part_ls)
+ si_delete_shader(sctx, sel->main_shader_part_ls);
+ if (sel->main_shader_part_es)
+ si_delete_shader(sctx, sel->main_shader_part_es);
if (sel->gs_copy_shader)
si_delete_shader(sctx, sel->gs_copy_shader);
util_queue_fence_destroy(&sel->ready);
- pipe_mutex_destroy(sel->mutex);
+ mtx_destroy(&sel->mutex);
free(sel->tokens);
free(sel);
}
+static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *sel = (struct si_shader_selector *)state;
+
+ si_shader_selector_reference(sctx, &sel, NULL);
+}
+
static unsigned si_get_ps_input_cntl(struct si_context *sctx,
struct si_shader *vs, unsigned name,
unsigned index, unsigned interpolate)
index == vsinfo->output_semantic_index[j]) {
offset = vs->info.vs_output_param_offset[j];
- if (offset <= EXP_PARAM_OFFSET_31) {
+ if (offset <= AC_EXP_PARAM_OFFSET_31) {
/* The input is loaded from parameter memory. */
ps_input_cntl |= S_028644_OFFSET(offset);
} else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
- if (offset == EXP_PARAM_UNDEFINED) {
+ if (offset == AC_EXP_PARAM_UNDEFINED) {
/* This can happen with depth-only rendering. */
offset = 0;
} else {
/* The input is a DEFAULT_VAL constant. */
- assert(offset >= EXP_PARAM_DEFAULT_VAL_0000 &&
- offset <= EXP_PARAM_DEFAULT_VAL_1111);
- offset -= EXP_PARAM_DEFAULT_VAL_0000;
+ assert(offset >= AC_EXP_PARAM_DEFAULT_VAL_0000 &&
+ offset <= AC_EXP_PARAM_DEFAULT_VAL_1111);
+ offset -= AC_EXP_PARAM_DEFAULT_VAL_0000;
}
ps_input_cntl = S_028644_OFFSET(0x20) |
unsigned num_se = sctx->screen->b.info.max_se;
unsigned wave_size = 64;
unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
- unsigned gs_vertex_reuse = 16 * num_se; /* GS_VERTEX_REUSE register (per SE) */
+ /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
+ * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
+ */
+ unsigned gs_vertex_reuse = (sctx->b.chip_class >= VI ? 32 : 16) * num_se;
unsigned alignment = 256 * num_se;
/* The maximum size is 63.999 MB per SE. */
unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
/* Some rings don't have to be allocated if shaders don't use them.
* (e.g. no varyings between ES and GS or GS and VS)
+ *
+ * GFX9 doesn't have the ESGS ring.
*/
- bool update_esgs = esgs_ring_size &&
+ bool update_esgs = sctx->b.chip_class <= VI &&
+ esgs_ring_size &&
(!sctx->esgs_ring ||
sctx->esgs_ring->width0 < esgs_ring_size);
bool update_gsvs = gsvs_ring_size &&
if (update_esgs) {
pipe_resource_reference(&sctx->esgs_ring, NULL);
- sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- esgs_ring_size);
+ sctx->esgs_ring =
+ r600_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ esgs_ring_size, alignment);
if (!sctx->esgs_ring)
return false;
}
if (update_gsvs) {
pipe_resource_reference(&sctx->gsvs_ring, NULL);
- sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- gsvs_ring_size);
+ sctx->gsvs_ring =
+ r600_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ gsvs_ring_size, alignment);
if (!sctx->gsvs_ring)
return false;
}
return false;
if (sctx->b.chip_class >= CIK) {
- if (sctx->esgs_ring)
+ if (sctx->esgs_ring) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE,
sctx->esgs_ring->width0 / 256);
+ }
if (sctx->gsvs_ring)
si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE,
sctx->gsvs_ring->width0 / 256);
/* Set ring bindings. */
if (sctx->esgs_ring) {
+ assert(sctx->b.chip_class <= VI);
si_set_ring_buffer(&sctx->b.b, SI_ES_RING_ESGS,
sctx->esgs_ring, 0, sctx->esgs_ring->width0,
true, true, 4, 64, 0);
assert(sctx->scratch_buffer);
- si_shader_apply_scratch_relocs(sctx, shader, &shader->config, scratch_va);
+ si_shader_apply_scratch_relocs(shader, scratch_va);
/* Replace the shader bo with a new bo that has the relocs applied. */
r = si_shader_binary_upload(sctx->screen, shader);
r600_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer = (struct r600_resource*)
- pipe_buffer_create(&sctx->screen->b.b, 0,
- PIPE_USAGE_DEFAULT, scratch_needed_size);
+ r600_aligned_buffer_create(&sctx->screen->b.b,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_needed_size, 256);
if (!sctx->scratch_buffer)
return false;
- sctx->emit_scratch_reloc = true;
+
+ si_mark_atom_dirty(sctx, &sctx->scratch_state);
+ r600_context_add_resource_size(&sctx->b.b,
+ &sctx->scratch_buffer->b.b);
}
/* Update the shaders, so they are using the latest scratch. The
S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
if (spi_tmpring_size != sctx->spi_tmpring_size) {
sctx->spi_tmpring_size = spi_tmpring_size;
- sctx->emit_scratch_reloc = true;
+ si_mark_atom_dirty(sctx, &sctx->scratch_state);
}
return true;
}
static void si_init_tess_factor_ring(struct si_context *sctx)
{
- bool double_offchip_buffers = sctx->b.chip_class >= CIK;
+ bool double_offchip_buffers = sctx->b.chip_class >= CIK &&
+ sctx->b.family != CHIP_CARRIZO &&
+ sctx->b.family != CHIP_STONEY;
unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
unsigned max_offchip_buffers = max_offchip_buffers_per_se *
sctx->screen->b.info.max_se;
max_offchip_buffers = MIN2(max_offchip_buffers, 126);
break;
case CIK:
+ case VI:
+ case GFX9:
max_offchip_buffers = MIN2(max_offchip_buffers, 508);
break;
- case VI:
default:
- max_offchip_buffers = MIN2(max_offchip_buffers, 512);
- break;
+ assert(0);
+ return;
}
assert(!sctx->tf_ring);
- sctx->tf_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- 32768 * sctx->screen->b.info.max_se);
+ /* Use 64K alignment for both rings, so that we can pass the address
+ * to shaders as one SGPR containing bits [16:47].
+ */
+ sctx->tf_ring = r600_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ 32768 * sctx->screen->b.info.max_se,
+ 64 * 1024);
if (!sctx->tf_ring)
return;
assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0);
- sctx->tess_offchip_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- max_offchip_buffers *
- sctx->screen->tess_offchip_block_dw_size * 4);
+ sctx->tess_offchip_ring =
+ r600_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ max_offchip_buffers *
+ sctx->screen->tess_offchip_block_dw_size * 4,
+ 64 * 1024);
if (!sctx->tess_offchip_ring)
return;
si_init_config_add_vgt_flush(sctx);
+ uint64_t offchip_va = r600_resource(sctx->tess_offchip_ring)->gpu_address;
+ uint64_t factor_va = r600_resource(sctx->tf_ring)->gpu_address;
+ assert((offchip_va & 0xffff) == 0);
+ assert((factor_va & 0xffff) == 0);
+
+ si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_offchip_ring),
+ RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
+ si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tf_ring),
+ RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
+
/* Append these registers to the init config state. */
if (sctx->b.chip_class >= CIK) {
if (sctx->b.chip_class >= VI)
si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
S_030938_SIZE(sctx->tf_ring->width0 / 4));
si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
- r600_resource(sctx->tf_ring)->gpu_address >> 8);
+ factor_va >> 8);
+ if (sctx->b.chip_class >= GFX9)
+ si_pm4_set_reg(sctx->init_config, R_030944_VGT_TF_MEMORY_BASE_HI,
+ factor_va >> 40);
si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM,
S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
S_03093C_OFFCHIP_GRANULARITY(offchip_granularity));
si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE,
S_008988_SIZE(sctx->tf_ring->width0 / 4));
si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE,
- r600_resource(sctx->tf_ring)->gpu_address >> 8);
+ factor_va >> 8);
si_pm4_set_reg(sctx->init_config, R_0089B0_VGT_HS_OFFCHIP_PARAM,
S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers));
}
+ if (sctx->b.chip_class >= GFX9) {
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_LS_0 +
+ GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K * 4,
+ offchip_va >> 16);
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_LS_0 +
+ GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K * 4,
+ factor_va >> 16);
+ } else {
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0 +
+ GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K * 4,
+ offchip_va >> 16);
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0 +
+ GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K * 4,
+ factor_va >> 16);
+ }
+
/* Flush the context to re-emit the init_config state.
* This is done only once in a lifetime of a context.
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->b.initial_gfx_cs_size = 0; /* force flush */
si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
-
- si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_FACTOR, sctx->tf_ring,
- 0, sctx->tf_ring->width0, false, false, 0, 0, 0);
-
- si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_OFFCHIP,
- sctx->tess_offchip_ring, 0,
- sctx->tess_offchip_ring->width0, false, false, 0, 0, 0);
}
/**
S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
}
+ if (sctx->b.chip_class >= GFX9)
+ stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
+
si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
}
si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
}
/* VS as LS */
- r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state);
- if (r)
- return false;
- si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
+ if (sctx->b.chip_class <= VI) {
+ r = si_shader_select(ctx, &sctx->vs_shader,
+ &compiler_state);
+ if (r)
+ return false;
+ si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
+ }
if (sctx->tcs_shader.cso) {
r = si_shader_select(ctx, &sctx->tcs_shader,
sctx->fixed_func_tcs_shader.current->pm4);
}
- r = si_shader_select(ctx, &sctx->tes_shader, &compiler_state);
- if (r)
- return false;
-
if (sctx->gs_shader.cso) {
/* TES as ES */
- si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
+ if (sctx->b.chip_class <= VI) {
+ r = si_shader_select(ctx, &sctx->tes_shader,
+ &compiler_state);
+ if (r)
+ return false;
+ si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
+ }
} else {
/* TES as VS */
+ r = si_shader_select(ctx, &sctx->tes_shader,
+ &compiler_state);
+ if (r)
+ return false;
si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
si_update_so(sctx, sctx->tes_shader.cso);
}
} else if (sctx->gs_shader.cso) {
- /* VS as ES */
- r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state);
- if (r)
- return false;
- si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
+ if (sctx->b.chip_class <= VI) {
+ /* VS as ES */
+ r = si_shader_select(ctx, &sctx->vs_shader,
+ &compiler_state);
+ if (r)
+ return false;
+ si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
- si_pm4_bind_state(sctx, ls, NULL);
- si_pm4_bind_state(sctx, hs, NULL);
+ si_pm4_bind_state(sctx, ls, NULL);
+ si_pm4_bind_state(sctx, hs, NULL);
+ }
} else {
/* VS as VS */
r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state);
return false;
} else {
si_pm4_bind_state(sctx, gs, NULL);
- si_pm4_bind_state(sctx, es, NULL);
+ if (sctx->b.chip_class <= VI)
+ si_pm4_bind_state(sctx, es, NULL);
}
si_update_vgt_shader_config(sctx);
si_mark_atom_dirty(sctx, &sctx->spi_map);
}
- if (sctx->b.family == CHIP_STONEY && si_pm4_state_changed(sctx, ps))
+ if (sctx->screen->b.rbplus_allowed && si_pm4_state_changed(sctx, ps))
si_mark_atom_dirty(sctx, &sctx->cb_render_state);
if (sctx->ps_db_shader_control != db_shader_control) {
return true;
}
+static void si_emit_scratch_state(struct si_context *sctx,
+ struct r600_atom *atom)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+
+ radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+ sctx->spi_tmpring_size);
+
+ if (sctx->scratch_buffer) {
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ sctx->scratch_buffer, RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SCRATCH_BUFFER);
+ }
+}
+
void si_init_shader_functions(struct si_context *sctx)
{
si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);
+ si_init_atom(sctx, &sctx->scratch_state, &sctx->atoms.s.scratch_state,
+ si_emit_scratch_state);
sctx->b.b.create_vs_state = si_create_shader_selector;
sctx->b.b.create_tcs_state = si_create_shader_selector;