#include "sid.h"
#include "compiler/nir/nir_serialize.h"
+#include "nir/tgsi_to_nir.h"
#include "tgsi/tgsi_parse.h"
#include "util/hash_table.h"
#include "util/crc32.h"
* Return the IR binary in a buffer. For TGSI the first 4 bytes contain its
* size as integer.
*/
-void *si_get_ir_binary(struct si_shader_selector *sel)
+void *si_get_ir_binary(struct si_shader_selector *sel, bool ngg, bool es)
{
struct blob blob;
unsigned ir_size;
assert(sel->nir);
blob_init(&blob);
- nir_serialize(&blob, sel->nir);
+ nir_serialize(&blob, sel->nir, true);
ir_binary = blob.data;
ir_size = blob.size;
}
- unsigned size = 4 + ir_size + sizeof(sel->so);
+ /* These settings affect the compilation, but they are not derived
+ * from the input shader IR.
+ */
+ unsigned shader_variant_flags = 0;
+
+ if (ngg)
+ shader_variant_flags |= 1 << 0;
+ if (sel->nir)
+ shader_variant_flags |= 1 << 1;
+ if (si_get_wave_size(sel->screen, sel->type, ngg, es) == 32)
+ shader_variant_flags |= 1 << 2;
+ if (sel->force_correct_derivs_after_kill)
+ shader_variant_flags |= 1 << 3;
+
+ unsigned size = 4 + 4 + ir_size + sizeof(sel->so);
char *result = (char*)MALLOC(size);
if (!result)
return NULL;
- *((uint32_t*)result) = size;
- memcpy(result + 4, ir_binary, ir_size);
- memcpy(result + 4 + ir_size, &sel->so, sizeof(sel->so));
+ ((uint32_t*)result)[0] = size;
+ ((uint32_t*)result)[1] = shader_variant_flags;
+ memcpy(result + 8, ir_binary, ir_size);
+ memcpy(result + 8 + ir_size, &sel->so, sizeof(sel->so));
if (sel->nir)
blob_finish(&blob);
bool si_init_shader_cache(struct si_screen *sscreen)
{
- (void) mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
sscreen->shader_cache =
_mesa_hash_table_create(NULL,
si_shader_cache_key_hash,
if (sscreen->shader_cache)
_mesa_hash_table_destroy(sscreen->shader_cache,
si_destroy_shader_cache_entry);
- mtx_destroy(&sscreen->shader_cache_mutex);
+ simple_mtx_destroy(&sscreen->shader_cache_mutex);
}
/* SHADER STATES */
else
topology = V_028B6C_OUTPUT_TRIANGLE_CW;
- if (sscreen->has_distributed_tess) {
+ if (sscreen->info.has_distributed_tess) {
if (sscreen->info.family == CHIP_FIJI ||
sscreen->info.family >= CHIP_POLARIS10)
distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
{
unsigned type = sel->type;
- if (sscreen->info.family < CHIP_POLARIS10)
+ if (sscreen->info.family < CHIP_POLARIS10 ||
+ sscreen->info.chip_class >= GFX10)
return;
/* VS as VS, or VS as ES: */
return num_always_on_user_sgprs + 1;
}
+/* Return VGPR_COMP_CNT for the API vertex shader. This can be hw LS, LSHS, ES, ESGS, VS. */
+static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen,
+ struct si_shader *shader, bool legacy_vs_prim_id)
+{
+ assert(shader->selector->type == PIPE_SHADER_VERTEX ||
+ (shader->previous_stage_sel &&
+ shader->previous_stage_sel->type == PIPE_SHADER_VERTEX));
+
+ /* GFX6-9 LS (VertexID, RelAutoindex, InstanceID / StepRate0(==1), ...).
+ * GFX6-9 ES,VS (VertexID, InstanceID / StepRate0(==1), VSPrimID, ...)
+ * GFX10 LS (VertexID, RelAutoindex, UserVGPR1, InstanceID).
+ * GFX10 ES,VS (VertexID, UserVGPR0, UserVGPR1 or VSPrimID, UserVGPR2 or InstanceID)
+ */
+ bool is_ls = shader->selector->type == PIPE_SHADER_TESS_CTRL || shader->key.as_ls;
+
+ if (sscreen->info.chip_class >= GFX10 && shader->info.uses_instanceid)
+ return 3;
+ else if ((is_ls && shader->info.uses_instanceid) || legacy_vs_prim_id)
+ return 2;
+ else if (is_ls || shader->info.uses_instanceid)
+ return 1;
+ else
+ return 0;
+}
+
static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_pm4_state *pm4;
- unsigned vgpr_comp_cnt;
uint64_t va;
assert(sscreen->info.chip_class <= GFX8);
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- /* We need at least 2 components for LS.
- * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
- * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
- */
- vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
-
si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40));
shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
+ S_00B528_VGPR_COMP_CNT(si_get_vs_vgpr_comp_cnt(sscreen, shader, false)) |
S_00B528_DX10_CLAMP(1) |
S_00B528_FLOAT_MODE(shader->config.float_mode);
shader->config.rsrc2 = S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR)) |
{
struct si_pm4_state *pm4;
uint64_t va;
- unsigned ls_vgpr_comp_cnt = 0;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
if (sscreen->info.chip_class >= GFX9) {
- si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
- si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40));
-
- /* We need at least 2 components for LS.
- * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
- * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
- */
- ls_vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
+ if (sscreen->info.chip_class >= GFX10) {
+ si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40));
+ } else {
+ si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40));
+ }
unsigned num_user_sgprs =
si_get_num_vs_user_sgprs(GFX9_TCS_NUM_USER_SGPR);
shader->config.rsrc2 =
S_00B42C_USER_SGPR(num_user_sgprs) |
- S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5) |
S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
+
+ if (sscreen->info.chip_class >= GFX10)
+ shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
+ else
+ shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
} else {
si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, S_00B424_MEM_BASE(va >> 40));
}
si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
- S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) |
- S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) |
+ S_00B428_VGPRS((shader->config.num_vgprs - 1) /
+ (sscreen->ge_wave_size == 32 ? 8 : 4)) |
+ (sscreen->info.chip_class <= GFX9 ?
+ S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) : 0) |
S_00B428_DX10_CLAMP(1) |
+ S_00B428_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
+ S_00B428_WGP_MODE(sscreen->info.chip_class >= GFX10) |
S_00B428_FLOAT_MODE(shader->config.float_mode) |
- S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt));
+ S_00B428_LS_VGPR_COMP_CNT(sscreen->info.chip_class >= GFX9 ?
+ si_get_vs_vgpr_comp_cnt(sscreen, shader, false) : 0));
if (sscreen->info.chip_class <= GFX8) {
si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
if (shader->selector->type == PIPE_SHADER_VERTEX) {
- /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
+ vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
num_user_sgprs = si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR);
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
}
-static unsigned si_conv_prim_to_gs_out(unsigned mode)
-{
- static const int prim_conv[] = {
- [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
- [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
- };
- assert(mode < ARRAY_SIZE(prim_conv));
-
- return prim_conv[mode];
-}
-
void gfx9_get_gs_info(struct si_shader_selector *es,
struct si_shader_selector *gs,
struct gfx9_gs_info *out)
return;
/* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2
- * R_028A68_VGT_GSVS_RING_OFFSET_3, R_028A6C_VGT_GS_OUT_PRIM_TYPE */
- radeon_opt_set_context_reg4(sctx, R_028A60_VGT_GSVS_RING_OFFSET_1,
+ * R_028A68_VGT_GSVS_RING_OFFSET_3 */
+ radeon_opt_set_context_reg3(sctx, R_028A60_VGT_GSVS_RING_OFFSET_1,
SI_TRACKED_VGT_GSVS_RING_OFFSET_1,
shader->ctx_reg.gs.vgt_gsvs_ring_offset_1,
shader->ctx_reg.gs.vgt_gsvs_ring_offset_2,
- shader->ctx_reg.gs.vgt_gsvs_ring_offset_3,
- shader->ctx_reg.gs.vgt_gs_out_prim_type);
-
+ shader->ctx_reg.gs.vgt_gsvs_ring_offset_3);
/* R_028AB0_VGT_GSVS_RING_ITEMSIZE */
radeon_opt_set_context_reg(sctx, R_028AB0_VGT_GSVS_RING_ITEMSIZE,
offset += num_components[2] * sel->gs_max_out_vertices;
shader->ctx_reg.gs.vgt_gsvs_ring_offset_3 = offset;
- shader->ctx_reg.gs.vgt_gs_out_prim_type =
- si_conv_prim_to_gs_out(sel->gs_output_prim);
-
if (max_stream >= 3)
offset += num_components[3] * sel->gs_max_out_vertices;
shader->ctx_reg.gs.vgt_gsvs_ring_itemsize = offset;
unsigned es_type = shader->key.part.gs.es->type;
unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
- if (es_type == PIPE_SHADER_VERTEX)
- /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
- es_vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
- else if (es_type == PIPE_SHADER_TESS_EVAL)
+ if (es_type == PIPE_SHADER_VERTEX) {
+ es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
+ } else if (es_type == PIPE_SHADER_TESS_EVAL)
es_vgpr_comp_cnt = shader->key.part.gs.es->info.uses_primid ? 3 : 2;
else
unreachable("invalid shader selector type");
else
num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
- si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
- si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40));
+ if (sscreen->info.chip_class >= GFX10) {
+ si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
+ si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(va >> 40));
+ } else {
+ si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
+ si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40));
+ }
- si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
- S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
- S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B228_DX10_CLAMP(1) |
- S_00B228_FLOAT_MODE(shader->config.float_mode) |
- S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
- si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
- S_00B22C_USER_SGPR(num_user_sgprs) |
- S_00B22C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5) |
- S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
- S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
- S_00B22C_LDS_SIZE(shader->config.lds_size) |
- S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+ uint32_t rsrc1 =
+ S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
+ S_00B228_DX10_CLAMP(1) |
+ S_00B228_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
+ S_00B228_WGP_MODE(sscreen->info.chip_class >= GFX10) |
+ S_00B228_FLOAT_MODE(shader->config.float_mode) |
+ S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
+ uint32_t rsrc2 =
+ S_00B22C_USER_SGPR(num_user_sgprs) |
+ S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
+ S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
+ S_00B22C_LDS_SIZE(shader->config.lds_size) |
+ S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
+
+ if (sscreen->info.chip_class >= GFX10) {
+ rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
+ } else {
+ rsrc1 |= S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8);
+ rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
+ }
+
+ si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, rsrc1);
+ si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, rsrc2);
shader->ctx_reg.gs.vgt_gs_onchip_cntl =
S_028A44_ES_VERTS_PER_SUBGRP(shader->gs_info.es_verts_per_subgroup) |
}
}
+/* Common tail code for NGG primitive shaders. */
+static void gfx10_emit_shader_ngg_tail(struct si_context *sctx,
+ struct si_shader *shader,
+ unsigned initial_cdw)
+{
+ radeon_opt_set_context_reg(sctx, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
+ SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP,
+ shader->ctx_reg.ngg.ge_max_output_per_subgroup);
+ radeon_opt_set_context_reg(sctx, R_028B4C_GE_NGG_SUBGRP_CNTL,
+ SI_TRACKED_GE_NGG_SUBGRP_CNTL,
+ shader->ctx_reg.ngg.ge_ngg_subgrp_cntl);
+ radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN,
+ SI_TRACKED_VGT_PRIMITIVEID_EN,
+ shader->ctx_reg.ngg.vgt_primitiveid_en);
+ radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
+ SI_TRACKED_VGT_GS_ONCHIP_CNTL,
+ shader->ctx_reg.ngg.vgt_gs_onchip_cntl);
+ radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT,
+ SI_TRACKED_VGT_GS_INSTANCE_CNT,
+ shader->ctx_reg.ngg.vgt_gs_instance_cnt);
+ radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
+ SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
+ shader->ctx_reg.ngg.vgt_esgs_ring_itemsize);
+ radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG,
+ SI_TRACKED_SPI_VS_OUT_CONFIG,
+ shader->ctx_reg.ngg.spi_vs_out_config);
+ radeon_opt_set_context_reg2(sctx, R_028708_SPI_SHADER_IDX_FORMAT,
+ SI_TRACKED_SPI_SHADER_IDX_FORMAT,
+ shader->ctx_reg.ngg.spi_shader_idx_format,
+ shader->ctx_reg.ngg.spi_shader_pos_format);
+ radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL,
+ SI_TRACKED_PA_CL_VTE_CNTL,
+ shader->ctx_reg.ngg.pa_cl_vte_cntl);
+ radeon_opt_set_context_reg(sctx, R_028838_PA_CL_NGG_CNTL,
+ SI_TRACKED_PA_CL_NGG_CNTL,
+ shader->ctx_reg.ngg.pa_cl_ngg_cntl);
+
+ radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS,
+ shader->pa_cl_vs_out_cntl,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK);
+
+ if (initial_cdw != sctx->gfx_cs->current.cdw)
+ sctx->context_roll = true;
+}
+
+static void gfx10_emit_shader_ngg_notess_nogs(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.gs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
+}
+
+static void gfx10_emit_shader_ngg_tess_nogs(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.gs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+
+ gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
+}
+
+static void gfx10_emit_shader_ngg_notess_gs(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.gs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT,
+ SI_TRACKED_VGT_GS_MAX_VERT_OUT,
+ shader->ctx_reg.ngg.vgt_gs_max_vert_out);
+
+ gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
+}
+
+static void gfx10_emit_shader_ngg_tess_gs(struct si_context *sctx)
+{
+ struct si_shader *shader = sctx->queued.named.gs->shader;
+ unsigned initial_cdw = sctx->gfx_cs->current.cdw;
+
+ if (!shader)
+ return;
+
+ radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT,
+ SI_TRACKED_VGT_GS_MAX_VERT_OUT,
+ shader->ctx_reg.ngg.vgt_gs_max_vert_out);
+ radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM,
+ SI_TRACKED_VGT_TF_PARAM,
+ shader->vgt_tf_param);
+
+ gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw);
+}
+
+unsigned si_get_input_prim(const struct si_shader_selector *gs)
+{
+ if (gs->type == PIPE_SHADER_GEOMETRY)
+ return gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
+
+ if (gs->type == PIPE_SHADER_TESS_EVAL) {
+ if (gs->info.properties[TGSI_PROPERTY_TES_POINT_MODE])
+ return PIPE_PRIM_POINTS;
+ if (gs->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
+ return PIPE_PRIM_LINES;
+ return PIPE_PRIM_TRIANGLES;
+ }
+
+ /* TODO: Set this correctly if the primitive type is set in the shader key. */
+ return PIPE_PRIM_TRIANGLES; /* worst case for all callers */
+}
+
+static unsigned si_get_vs_out_cntl(const struct si_shader_selector *sel, bool ngg)
+{
+ bool misc_vec_ena =
+ sel->info.writes_psize || (sel->info.writes_edgeflag && !ngg) ||
+ sel->info.writes_layer || sel->info.writes_viewport_index;
+ return S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) |
+ S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag && !ngg) |
+ S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
+ S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
+ S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
+ S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
+}
+
+/**
+ * Prepare the PM4 image for \p shader, which will run as a merged ESGS shader
+ * in NGG mode.
+ */
+static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader)
+{
+ const struct si_shader_selector *gs_sel = shader->selector;
+ const struct tgsi_shader_info *gs_info = &gs_sel->info;
+ enum pipe_shader_type gs_type = shader->selector->type;
+ const struct si_shader_selector *es_sel =
+ shader->previous_stage_sel ? shader->previous_stage_sel : shader->selector;
+ const struct tgsi_shader_info *es_info = &es_sel->info;
+ enum pipe_shader_type es_type = es_sel->type;
+ unsigned num_user_sgprs;
+ unsigned nparams, es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
+ uint64_t va;
+ unsigned window_space =
+ gs_info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
+ bool es_enable_prim_id = shader->key.mono.u.vs_export_prim_id || es_info->uses_primid;
+ unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
+ unsigned input_prim = si_get_input_prim(gs_sel);
+ bool break_wave_at_eoi = false;
+ struct si_pm4_state *pm4 = si_get_shader_pm4_state(shader);
+ if (!pm4)
+ return;
+
+ if (es_type == PIPE_SHADER_TESS_EVAL) {
+ pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_tess_gs
+ : gfx10_emit_shader_ngg_tess_nogs;
+ } else {
+ pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_notess_gs
+ : gfx10_emit_shader_ngg_notess_nogs;
+ }
+
+ va = shader->bo->gpu_address;
+ si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
+
+ if (es_type == PIPE_SHADER_VERTEX) {
+ es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false);
+
+ if (es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
+ num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
+ es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
+ } else {
+ num_user_sgprs = si_get_num_vs_user_sgprs(GFX9_VSGS_NUM_USER_SGPR);
+ }
+ } else {
+ assert(es_type == PIPE_SHADER_TESS_EVAL);
+ es_vgpr_comp_cnt = es_enable_prim_id ? 3 : 2;
+ num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
+
+ if (es_enable_prim_id || gs_info->uses_primid)
+ break_wave_at_eoi = true;
+ }
+
+ /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
+ * VGPR[0:4] are always loaded.
+ *
+ * Vertex shaders always need to load VGPR3, because they need to
+ * pass edge flags for decomposed primitives (such as quads) to the PA
+ * for the GL_LINE polygon mode to skip rendering lines on inner edges.
+ */
+ if (gs_info->uses_invocationid || gs_type == PIPE_SHADER_VERTEX)
+ gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID, edge flags. */
+ else if (gs_info->uses_primid)
+ gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
+ else if (input_prim >= PIPE_PRIM_TRIANGLES)
+ gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
+ else
+ gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
+
+ si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
+ si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
+ si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
+ S_00B228_VGPRS((shader->config.num_vgprs - 1) /
+ (sscreen->ge_wave_size == 32 ? 8 : 4)) |
+ S_00B228_FLOAT_MODE(shader->config.float_mode) |
+ S_00B228_DX10_CLAMP(1) |
+ S_00B228_MEM_ORDERED(1) |
+ S_00B228_WGP_MODE(1) |
+ S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
+ si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
+ S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0) |
+ S_00B22C_USER_SGPR(num_user_sgprs) |
+ S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
+ S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5) |
+ S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
+ S_00B22C_LDS_SIZE(shader->config.lds_size));
+
+ nparams = MAX2(shader->info.nr_param_exports, 1);
+ shader->ctx_reg.ngg.spi_vs_out_config =
+ S_0286C4_VS_EXPORT_COUNT(nparams - 1) |
+ S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0);
+
+ shader->ctx_reg.ngg.spi_shader_idx_format =
+ S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP);
+ shader->ctx_reg.ngg.spi_shader_pos_format =
+ S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
+ S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE) |
+ S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
+ V_02870C_SPI_SHADER_4COMP :
+ V_02870C_SPI_SHADER_NONE);
+
+ shader->ctx_reg.ngg.vgt_primitiveid_en =
+ S_028A84_PRIMITIVEID_EN(es_enable_prim_id) |
+ S_028A84_NGG_DISABLE_PROVOK_REUSE(es_enable_prim_id);
+
+ if (gs_type == PIPE_SHADER_GEOMETRY) {
+ shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = es_sel->esgs_itemsize / 4;
+ shader->ctx_reg.ngg.vgt_gs_max_vert_out = gs_sel->gs_max_out_vertices;
+ } else {
+ shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = 1;
+ }
+
+ if (es_type == PIPE_SHADER_TESS_EVAL)
+ si_set_tesseval_regs(sscreen, es_sel, pm4);
+
+ shader->ctx_reg.ngg.vgt_gs_onchip_cntl =
+ S_028A44_ES_VERTS_PER_SUBGRP(shader->ngg.hw_max_esverts) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(shader->ngg.max_gsprims) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->ngg.max_gsprims * gs_num_invocations);
+ shader->ctx_reg.ngg.ge_max_output_per_subgroup =
+ S_0287FC_MAX_VERTS_PER_SUBGROUP(shader->ngg.max_out_verts);
+ shader->ctx_reg.ngg.ge_ngg_subgrp_cntl =
+ S_028B4C_PRIM_AMP_FACTOR(shader->ngg.prim_amp_factor) |
+ S_028B4C_THDS_PER_SUBGRP(0); /* for fast launch */
+ shader->ctx_reg.ngg.vgt_gs_instance_cnt =
+ S_028B90_CNT(gs_num_invocations) |
+ S_028B90_ENABLE(gs_num_invocations > 1) |
+ S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(
+ shader->ngg.max_vert_out_per_gs_instance);
+
+ /* Always output hw-generated edge flags and pass them via the prim
+ * export to prevent drawing lines on internal edges of decomposed
+ * primitives (such as quads) with polygon mode = lines. Only VS needs
+ * this.
+ */
+ shader->ctx_reg.ngg.pa_cl_ngg_cntl =
+ S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type == PIPE_SHADER_VERTEX);
+ shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(gs_sel, true);
+
+ shader->ge_cntl =
+ S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) |
+ S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts) |
+ S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
+
+ /* Bug workaround for a possible hang with non-tessellation cases.
+ * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
+ *
+ * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
+ */
+ if ((sscreen->info.family == CHIP_NAVI10 ||
+ sscreen->info.family == CHIP_NAVI12 ||
+ sscreen->info.family == CHIP_NAVI14) &&
+ (es_type == PIPE_SHADER_VERTEX || gs_type == PIPE_SHADER_VERTEX) && /* = no tess */
+ shader->ngg.hw_max_esverts != 256) {
+ shader->ge_cntl &= C_03096C_VERT_GRP_SIZE;
+
+ if (shader->ngg.hw_max_esverts > 5) {
+ shader->ge_cntl |=
+ S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts - 5);
+ }
+ }
+
+ if (window_space) {
+ shader->ctx_reg.ngg.pa_cl_vte_cntl =
+ S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1);
+ } else {
+ shader->ctx_reg.ngg.pa_cl_vte_cntl =
+ S_028818_VTX_W0_FMT(1) |
+ S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
+ S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
+ S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1);
+ }
+}
+
static void si_emit_shader_vs(struct si_context *sctx)
{
struct si_shader *shader = sctx->queued.named.vs->shader;
if (initial_cdw != sctx->gfx_cs->current.cdw)
sctx->context_roll = true;
+
+ /* Required programming for tessellation. (legacy pipeline only) */
+ if (sctx->chip_class == GFX10 &&
+ shader->selector->type == PIPE_SHADER_TESS_EVAL) {
+ radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
+ SI_TRACKED_VGT_GS_ONCHIP_CNTL,
+ S_028A44_ES_VERTS_PER_SUBGRP(250) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(126) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
+ }
+
+ if (sctx->chip_class >= GFX10) {
+ radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS,
+ shader->pa_cl_vs_out_cntl,
+ SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK);
+ }
}
/**
vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
} else if (shader->selector->type == PIPE_SHADER_VERTEX) {
- /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
- * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
- * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
- */
- vgpr_comp_cnt = enable_prim_id ? 2 : (shader->info.uses_instanceid ? 1 : 0);
+ vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, enable_prim_id);
- if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) {
+ if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
- info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+ info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
} else {
num_user_sgprs = si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR);
}
nparams = MAX2(shader->info.nr_param_exports, 1);
shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
+ if (sscreen->info.chip_class >= GFX10) {
+ shader->ctx_reg.vs.spi_vs_out_config |=
+ S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0);
+ }
+
shader->ctx_reg.vs.spi_shader_pos_format =
S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ?
S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ?
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE);
+ shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, false);
oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(va >> 40));
- si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
- S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) |
- S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) |
- S_00B128_DX10_CLAMP(1) |
- S_00B128_FLOAT_MODE(shader->config.float_mode));
- si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
- S_00B12C_USER_SGPR(num_user_sgprs) |
- S_00B12C_OC_LDS_EN(oc_lds_en) |
- S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
- S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
- S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
- S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
- S_00B12C_SO_EN(!!shader->selector->so.num_outputs) |
- S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+
+ uint32_t rsrc1 = S_00B128_VGPRS((shader->config.num_vgprs - 1) /
+ (sscreen->ge_wave_size == 32 ? 8 : 4)) |
+ S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) |
+ S_00B128_DX10_CLAMP(1) |
+ S_00B128_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
+ S_00B128_FLOAT_MODE(shader->config.float_mode);
+ uint32_t rsrc2 = S_00B12C_USER_SGPR(num_user_sgprs) |
+ S_00B12C_OC_LDS_EN(oc_lds_en) |
+ S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
+
+ if (sscreen->info.chip_class <= GFX9)
+ rsrc1 |= S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8);
+
+ if (!sscreen->use_ngg_streamout) {
+ rsrc2 |= S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
+ S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
+ S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
+ S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
+ S_00B12C_SO_EN(!!shader->selector->so.num_outputs);
+ }
+
+ si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, rsrc1);
+ si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, rsrc2);
if (window_space)
shader->ctx_reg.vs.pa_cl_vte_cntl =
sctx->context_roll = true;
}
-static void si_shader_ps(struct si_shader *shader)
+static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader)
{
struct tgsi_shader_info *info = &shader->selector->info;
struct si_pm4_state *pm4;
* stalls without this setting.
*
* Don't add this to CB_SHADER_MASK.
+ *
+ * GFX10 supports pixel shaders without exports by setting both
+ * the color and Z formats to SPI_SHADER_ZERO. The hw will skip export
+ * instructions if any are present.
*/
- if (!spi_shader_col_format &&
+ if ((sscreen->info.chip_class <= GFX9 ||
+ info->uses_kill ||
+ shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS) &&
+ !spi_shader_col_format &&
!info->writes_z && !info->writes_stencil && !info->writes_samplemask)
spi_shader_col_format = V_028714_SPI_SHADER_32_R;
shader->ctx_reg.ps.spi_ps_input_addr = shader->config.spi_ps_input_addr;
/* Set interpolation controls. */
- spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader));
+ spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader)) |
+ S_0286D8_PS_W32_EN(sscreen->ps_wave_size == 32);
shader->ctx_reg.ps.spi_baryc_cntl = spi_baryc_cntl;
shader->ctx_reg.ps.spi_ps_in_control = spi_ps_in_control;
si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, S_00B024_MEM_BASE(va >> 40));
- si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
- S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) |
- S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B028_DX10_CLAMP(1) |
- S_00B028_FLOAT_MODE(shader->config.float_mode));
+ uint32_t rsrc1 =
+ S_00B028_VGPRS((shader->config.num_vgprs - 1) /
+ (sscreen->ps_wave_size == 32 ? 8 : 4)) |
+ S_00B028_DX10_CLAMP(1) |
+ S_00B028_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
+ S_00B028_FLOAT_MODE(shader->config.float_mode);
+
+ if (sscreen->info.chip_class < GFX10) {
+ rsrc1 |= S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8);
+ }
+
+ si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, rsrc1);
si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) |
S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) |
si_shader_ls(sscreen, shader);
else if (shader->key.as_es)
si_shader_es(sscreen, shader);
+ else if (shader->key.as_ngg)
+ gfx10_shader_ngg(sscreen, shader);
else
si_shader_vs(sscreen, shader, NULL);
break;
case PIPE_SHADER_TESS_EVAL:
if (shader->key.as_es)
si_shader_es(sscreen, shader);
+ else if (shader->key.as_ngg)
+ gfx10_shader_ngg(sscreen, shader);
else
si_shader_vs(sscreen, shader, NULL);
break;
case PIPE_SHADER_GEOMETRY:
- si_shader_gs(sscreen, shader);
+ if (shader->key.as_ngg)
+ gfx10_shader_ngg(sscreen, shader);
+ else
+ si_shader_gs(sscreen, shader);
break;
case PIPE_SHADER_FRAGMENT:
- si_shader_ps(shader);
+ si_shader_ps(sscreen, shader);
break;
default:
assert(0);
static unsigned si_get_alpha_test_func(struct si_context *sctx)
{
/* Alpha-test should be disabled if colorbuffer 0 is integer. */
- if (sctx->queued.named.dsa)
- return sctx->queued.named.dsa->alpha_func;
-
- return PIPE_FUNC_ALWAYS;
+ return sctx->queued.named.dsa->alpha_func;
}
void si_shader_selector_key_vs(struct si_context *sctx,
struct si_vs_prolog_bits *prolog_key)
{
if (!sctx->vertex_elements ||
- vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS])
+ vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD])
return;
struct si_vertex_elements *elts = sctx->vertex_elements;
/* Find out if PS is disabled. */
bool ps_disabled = true;
if (ps) {
- const struct si_state_blend *blend = sctx->queued.named.blend;
- bool alpha_to_coverage = blend && blend->alpha_to_coverage;
bool ps_modifies_zs = ps->info.uses_kill ||
ps->info.writes_z ||
ps->info.writes_stencil ||
ps->info.writes_samplemask ||
- alpha_to_coverage ||
+ sctx->queued.named.blend->alpha_to_coverage ||
si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS;
unsigned ps_colormask = si_get_total_colormask(sctx);
if (sctx->tes_shader.cso)
key->as_ls = 1;
- else if (sctx->gs_shader.cso)
+ else if (sctx->gs_shader.cso) {
key->as_es = 1;
- else {
+ key->as_ngg = stages_key.u.ngg;
+ } else {
key->as_ngg = stages_key.u.ngg;
si_shader_selector_key_hw_vs(sctx, sel, key);
key->mono.u.ff_tcs_inputs_to_copy = sctx->vs_shader.cso->outputs_written;
break;
case PIPE_SHADER_TESS_EVAL:
+ key->as_ngg = stages_key.u.ngg;
+
if (sctx->gs_shader.cso)
key->as_es = 1;
else {
- key->as_ngg = stages_key.u.ngg;
si_shader_selector_key_hw_vs(sctx, sel, key);
if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
sel->info.colors_written == 0x1)
key->part.ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
- if (blend) {
- /* Select the shader color format based on whether
- * blending or alpha are needed.
- */
- key->part.ps.epilog.spi_shader_col_format =
- (blend->blend_enable_4bit & blend->need_src_alpha_4bit &
- sctx->framebuffer.spi_shader_col_format_blend_alpha) |
- (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
- sctx->framebuffer.spi_shader_col_format_blend) |
- (~blend->blend_enable_4bit & blend->need_src_alpha_4bit &
- sctx->framebuffer.spi_shader_col_format_alpha) |
- (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
- sctx->framebuffer.spi_shader_col_format);
- key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
-
- /* The output for dual source blending should have
- * the same format as the first output.
- */
- if (blend->dual_src_blend)
- key->part.ps.epilog.spi_shader_col_format |=
- (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4;
- } else
- key->part.ps.epilog.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format;
+ /* Select the shader color format based on whether
+ * blending or alpha are needed.
+ */
+ key->part.ps.epilog.spi_shader_col_format =
+ (blend->blend_enable_4bit & blend->need_src_alpha_4bit &
+ sctx->framebuffer.spi_shader_col_format_blend_alpha) |
+ (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
+ sctx->framebuffer.spi_shader_col_format_blend) |
+ (~blend->blend_enable_4bit & blend->need_src_alpha_4bit &
+ sctx->framebuffer.spi_shader_col_format_alpha) |
+ (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
+ sctx->framebuffer.spi_shader_col_format);
+ key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
+
+ /* The output for dual source blending should have
+ * the same format as the first output.
+ */
+ if (blend->dual_src_blend) {
+ key->part.ps.epilog.spi_shader_col_format |=
+ (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4;
+ }
/* If alpha-to-coverage is enabled, we have to export alpha
* even if there is no color buffer.
*/
if (!(key->part.ps.epilog.spi_shader_col_format & 0xf) &&
- blend && blend->alpha_to_coverage)
+ blend->alpha_to_coverage)
key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
/* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs
key->part.ps.prolog.color_two_side = rs->two_side && sel->info.colors_read;
key->part.ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read;
- if (sctx->queued.named.blend) {
- key->part.ps.epilog.alpha_to_one = sctx->queued.named.blend->alpha_to_one &&
- rs->multisample_enable;
- }
+ key->part.ps.epilog.alpha_to_one = blend->alpha_to_one &&
+ rs->multisample_enable;
key->part.ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly;
key->part.ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) ||
sel->info.uses_linear_centroid +
sel->info.uses_linear_sample > 1;
- if (sel->info.opcode_count[TGSI_OPCODE_INTERP_SAMPLE])
+ if (sel->info.uses_persp_opcode_interp_sample ||
+ sel->info.uses_linear_opcode_interp_sample)
key->mono.u.ps.interpolate_at_sample_force_center = 1;
}
FILE *f = open_memstream(&shader->shader_log,
&shader->shader_log_size);
if (f) {
- si_shader_dump(sscreen, shader, NULL, sel->type, f, false);
+ si_shader_dump(sscreen, shader, NULL, f, false);
fclose(f);
}
}
if (thread_index < 0)
util_queue_fence_wait(&sel->ready);
- mtx_lock(&sel->mutex);
+ simple_mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
/* If it's an optimized shader and its compilation has
/* Build a new shader. */
shader = CALLOC_STRUCT(si_shader);
if (!shader) {
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
return -ENOMEM;
}
if (previous_stage_sel) {
struct si_shader_key shader1_key = zeroed;
- if (sel->type == PIPE_SHADER_TESS_CTRL)
+ if (sel->type == PIPE_SHADER_TESS_CTRL) {
shader1_key.as_ls = 1;
- else if (sel->type == PIPE_SHADER_GEOMETRY)
+ } else if (sel->type == PIPE_SHADER_GEOMETRY) {
shader1_key.as_es = 1;
- else
+ shader1_key.as_ngg = key->as_ngg; /* for Wave32 vs Wave64 */
+ } else {
assert(0);
+ }
- mtx_lock(&previous_stage_sel->mutex);
+ simple_mtx_lock(&previous_stage_sel->mutex);
ok = si_check_missing_main_part(sscreen,
previous_stage_sel,
compiler_state, &shader1_key);
- mtx_unlock(&previous_stage_sel->mutex);
+ simple_mtx_unlock(&previous_stage_sel->mutex);
}
if (ok) {
if (!ok) {
FREE(shader);
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
return -ENOMEM; /* skip the draw call */
}
}
/* Compile it asynchronously. */
util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
shader, &shader->ready,
- si_build_shader_variant_low_priority, NULL);
+ si_build_shader_variant_low_priority, NULL,
+ 0);
/* Add only after the ready fence was reset, to guard against a
* race with si_bind_XX_shader. */
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
if (sscreen->options.sync_compile)
util_queue_fence_wait(&shader->ready);
sel->last_variant = shader;
}
- mtx_unlock(&sel->mutex);
+ simple_mtx_unlock(&sel->mutex);
assert(!shader->is_optimized);
si_build_shader_variant(shader, thread_index, false);
assert(thread_index < ARRAY_SIZE(sscreen->compiler));
compiler = &sscreen->compiler[thread_index];
- if (sel->nir)
- si_lower_nir(sel);
-
/* Compile the main shader part for use with a prolog and/or epilog.
* If this fails, the driver will try to compile a monolithic shader
* on demand.
si_parse_next_shader_property(&sel->info,
sel->so.num_outputs != 0,
&shader->key);
- if (sscreen->info.chip_class >= GFX10 &&
- !sscreen->options.disable_ngg &&
- (((sel->type == PIPE_SHADER_VERTEX ||
- sel->type == PIPE_SHADER_TESS_EVAL) &&
- !shader->key.as_ls && !shader->key.as_es) ||
+
+ if (sscreen->use_ngg &&
+ (!sel->so.num_outputs || sscreen->use_ngg_streamout) &&
+ ((sel->type == PIPE_SHADER_VERTEX && !shader->key.as_ls) ||
+ sel->type == PIPE_SHADER_TESS_EVAL ||
sel->type == PIPE_SHADER_GEOMETRY))
shader->key.as_ngg = 1;
- if (sel->tokens || sel->nir)
- ir_binary = si_get_ir_binary(sel);
+ if (sel->tokens || sel->nir) {
+ ir_binary = si_get_ir_binary(sel, shader->key.as_ngg,
+ shader->key.as_es);
+ }
/* Try to load the shader from the shader cache. */
- mtx_lock(&sscreen->shader_cache_mutex);
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
if (ir_binary &&
si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
} else {
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
/* Compile the shader if it hasn't been loaded from the cache. */
if (si_compile_tgsi_shader(sscreen, compiler, shader,
}
if (ir_binary) {
- mtx_lock(&sscreen->shader_cache_mutex);
+ simple_mtx_lock(&sscreen->shader_cache_mutex);
if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
FREE(ir_binary);
- mtx_unlock(&sscreen->shader_cache_mutex);
+ simple_mtx_unlock(&sscreen->shader_cache_mutex);
}
}
}
/* The GS copy shader is always pre-compiled. */
- if (sel->type == PIPE_SHADER_GEOMETRY) {
+ if (sel->type == PIPE_SHADER_GEOMETRY &&
+ (!sscreen->use_ngg ||
+ !sscreen->use_ngg_streamout || /* also for PRIMITIVES_GENERATED */
+ sel->tess_turns_off_ngg)) {
sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, compiler, sel, debug);
if (!sel->gs_copy_shader) {
fprintf(stderr, "radeonsi: can't create GS copy shader\n");
}
util_queue_add_job(&sctx->screen->shader_compiler_queue, job,
- ready_fence, execute, NULL);
+ ready_fence, execute, NULL, 0);
if (debug) {
util_queue_fence_wait(ready_fence);
uint32_t *const_and_shader_buffers,
uint64_t *samplers_and_images)
{
- unsigned start, num_shaderbufs, num_constbufs, num_images, num_samplers;
+ unsigned start, num_shaderbufs, num_constbufs, num_images, num_msaa_images, num_samplers;
num_shaderbufs = util_last_bit(info->shader_buffers_declared);
num_constbufs = util_last_bit(info->const_buffers_declared);
/* two 8-byte images share one 16-byte slot */
num_images = align(util_last_bit(info->images_declared), 2);
+ num_msaa_images = align(util_last_bit(info->msaa_images_declared), 2);
num_samplers = util_last_bit(info->samplers_declared);
/* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
*const_and_shader_buffers =
u_bit_consecutive(start, num_shaderbufs + num_constbufs);
- /* The layout is: image[last] ... image[0], sampler[0] ... sampler[last] */
+ /* The layout is:
+ * - fmask[last] ... fmask[0] go to [15-last .. 15]
+ * - image[last] ... image[0] go to [31-last .. 31]
+ * - sampler[0] ... sampler[last] go to [32 .. 32+last*2]
+ *
+ * FMASKs for images are placed separately, because MSAA images are rare,
+ * and so we can benefit from a better cache hit rate if we keep image
+ * descriptors together.
+ */
+ if (num_msaa_images)
+ num_images = SI_NUM_IMAGES + num_msaa_images; /* add FMASK descriptors */
+
start = si_get_image_slot(num_images - 1) / 2;
*samplers_and_images =
u_bit_consecutive64(start, num_images / 2 + num_samplers);
sel->so = state->stream_output;
- if (state->type == PIPE_SHADER_IR_TGSI) {
+ if (state->type == PIPE_SHADER_IR_TGSI &&
+ !sscreen->options.enable_nir) {
sel->tokens = tgsi_dup_tokens(state->tokens);
if (!sel->tokens) {
FREE(sel);
tgsi_scan_shader(state->tokens, &sel->info);
tgsi_scan_tess_ctrl(state->tokens, &sel->info, &sel->tcs_info);
- } else {
- assert(state->type == PIPE_SHADER_IR_NIR);
- sel->nir = state->ir.nir;
+ /* Fixup for TGSI: Set which opcode uses which (i,j) pair. */
+ if (sel->info.uses_persp_opcode_interp_centroid)
+ sel->info.uses_persp_centroid = true;
+
+ if (sel->info.uses_linear_opcode_interp_centroid)
+ sel->info.uses_linear_centroid = true;
+
+ if (sel->info.uses_persp_opcode_interp_offset ||
+ sel->info.uses_persp_opcode_interp_sample)
+ sel->info.uses_persp_center = true;
+
+ if (sel->info.uses_linear_opcode_interp_offset ||
+ sel->info.uses_linear_opcode_interp_sample)
+ sel->info.uses_linear_center = true;
+ } else {
+ if (state->type == PIPE_SHADER_IR_TGSI) {
+ sel->nir = tgsi_to_nir(state->tokens, ctx->screen);
+ } else {
+ assert(state->type == PIPE_SHADER_IR_NIR);
+ sel->nir = state->ir.nir;
+ }
- si_nir_opts(sel->nir);
si_nir_scan_shader(sel->nir, &sel->info);
si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info);
+ si_nir_adjust_driver_locations(sel->nir);
}
sel->type = sel->info.processor;
/* The prolog is a no-op if there are no inputs. */
sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX &&
sel->info.num_inputs &&
- !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+ !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD];
sel->force_correct_derivs_after_kill =
sel->type == PIPE_SHADER_FRAGMENT &&
!sel->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] &&
!sel->so.num_outputs;
- /* Set which opcode uses which (i,j) pair. */
- if (sel->info.uses_persp_opcode_interp_centroid)
- sel->info.uses_persp_centroid = true;
-
- if (sel->info.uses_linear_opcode_interp_centroid)
- sel->info.uses_linear_centroid = true;
-
- if (sel->info.uses_persp_opcode_interp_offset ||
- sel->info.uses_persp_opcode_interp_sample)
- sel->info.uses_persp_center = true;
-
- if (sel->info.uses_linear_opcode_interp_offset ||
- sel->info.uses_linear_opcode_interp_sample)
- sel->info.uses_linear_center = true;
-
switch (sel->type) {
case PIPE_SHADER_GEOMETRY:
sel->gs_output_prim =
sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM];
+
+ /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */
+ sel->rast_prim = sel->gs_output_prim;
+ if (util_rast_prim_is_triangles(sel->rast_prim))
+ sel->rast_prim = PIPE_PRIM_TRIANGLES;
+
sel->gs_max_out_vertices =
sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES];
sel->gs_num_invocations =
sel->gs_input_verts_per_prim =
u_vertices_per_prim(sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]);
+
+ /* EN_MAX_VERT_OUT_PER_GS_INSTANCE does not work with tesselation. */
+ sel->tess_turns_off_ngg =
+ (sscreen->info.family == CHIP_NAVI10 ||
+ sscreen->info.family == CHIP_NAVI12 ||
+ sscreen->info.family == CHIP_NAVI14) &&
+ sel->gs_num_invocations * sel->gs_max_out_vertices > 256;
break;
case PIPE_SHADER_TESS_CTRL:
sel->esgs_itemsize += 4;
assert(((sel->esgs_itemsize / 4) & C_028AAC_ITEMSIZE) == 0);
+
+ /* Only for TES: */
+ if (sel->info.properties[TGSI_PROPERTY_TES_POINT_MODE])
+ sel->rast_prim = PIPE_PRIM_POINTS;
+ else if (sel->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
+ sel->rast_prim = PIPE_PRIM_LINE_STRIP;
+ else
+ sel->rast_prim = PIPE_PRIM_TRIANGLES;
break;
case PIPE_SHADER_FRAGMENT:
}
}
break;
+ default:;
}
/* PA_CL_VS_OUT_CNTL */
- bool misc_vec_ena =
- sel->info.writes_psize || sel->info.writes_edgeflag ||
- sel->info.writes_layer || sel->info.writes_viewport_index;
- sel->pa_cl_vs_out_cntl =
- S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) |
- S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag) |
- S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
- S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
- S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
- S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
+ if (sctx->chip_class <= GFX9)
+ sel->pa_cl_vs_out_cntl = si_get_vs_out_cntl(sel, false);
+
sel->clipdist_mask = sel->info.writes_clipvertex ?
SIX_BITS : sel->info.clipdist_writemask;
sel->culldist_mask = sel->info.culldist_writemask <<
sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
}
- (void) mtx_init(&sel->mutex, mtx_plain);
+ if (sel->info.properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE])
+ sel->db_shader_control |= S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(1);
+
+ (void) simple_mtx_init(&sel->mutex, mtx_plain);
si_schedule_initial_compile(sctx, sel->info.processor, &sel->ready,
&sel->compiler_ctx_state, sel,
sctx->vs_shader.cso = sel;
sctx->vs_shader.current = sel ? sel->first_variant : NULL;
- sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS] : 0;
+ sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] : 0;
+
+ if (si_update_ngg(sctx))
+ si_shader_change_notify(sctx);
si_update_common_shader_state(sctx);
si_update_vs_viewport_state(sctx);
sctx->ps_shader.cso->info.uses_primid);
}
-static bool si_update_ngg(struct si_context *sctx)
+bool si_update_ngg(struct si_context *sctx)
{
- if (sctx->chip_class <= GFX9 ||
- sctx->screen->options.disable_ngg)
+ if (!sctx->screen->use_ngg) {
+ assert(!sctx->ngg);
return false;
+ }
bool new_ngg = true;
- /* EN_MAX_VERT_OUT_PER_GS_INSTANCE does not work with tesselation. */
if (sctx->gs_shader.cso && sctx->tes_shader.cso &&
- sctx->gs_shader.cso->gs_num_invocations * sctx->gs_shader.cso->gs_max_out_vertices > 256)
+ sctx->gs_shader.cso->tess_turns_off_ngg) {
new_ngg = false;
+ } else if (!sctx->screen->use_ngg_streamout) {
+ struct si_shader_selector *last = si_get_vs(sctx)->cso;
+
+ if ((last && last->so.num_outputs) ||
+ sctx->streamout.prims_gen_query_enabled)
+ new_ngg = false;
+ }
if (new_ngg != sctx->ngg) {
+ /* Transitioning from NGG to legacy GS requires VGT_FLUSH on Navi10-14.
+ * VGT_FLUSH is also emitted at the beginning of IBs when legacy GS ring
+ * pointers are set.
+ */
+ if ((sctx->family == CHIP_NAVI10 ||
+ sctx->family == CHIP_NAVI12 ||
+ sctx->family == CHIP_NAVI14) &&
+ !new_ngg)
+ sctx->flags |= SI_CONTEXT_VGT_FLUSH;
+
sctx->ngg = new_ngg;
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
return true;
si_update_common_shader_state(sctx);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
- if (enable_changed) {
- si_update_ngg(sctx);
+ bool ngg_changed = si_update_ngg(sctx);
+ if (ngg_changed || enable_changed)
si_shader_change_notify(sctx);
+ if (enable_changed)
sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
- }
si_update_vs_viewport_state(sctx);
si_set_active_descriptors_for_shader(sctx, sel);
si_update_streamout_state(sctx);
util_queue_fence_destroy(&shader->ready);
if (shader->pm4) {
+ /* If destroyed shaders were not unbound, the next compiled
+ * shader variant could get the same pointer address and so
+ * binding it to the same shader stage would be considered
+ * a no-op, causing random behavior.
+ */
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
if (shader->key.as_ls) {
} else if (shader->key.as_es) {
assert(sctx->chip_class <= GFX8);
si_pm4_delete_state(sctx, es, shader->pm4);
+ } else if (shader->key.as_ngg) {
+ si_pm4_delete_state(sctx, gs, shader->pm4);
} else {
si_pm4_delete_state(sctx, vs, shader->pm4);
}
if (shader->key.as_es) {
assert(sctx->chip_class <= GFX8);
si_pm4_delete_state(sctx, es, shader->pm4);
+ } else if (shader->key.as_ngg) {
+ si_pm4_delete_state(sctx, gs, shader->pm4);
} else {
si_pm4_delete_state(sctx, vs, shader->pm4);
}
case PIPE_SHADER_FRAGMENT:
si_pm4_delete_state(sctx, ps, shader->pm4);
break;
+ default:;
}
}
si_delete_shader(sctx, sel->gs_copy_shader);
util_queue_fence_destroy(&sel->ready);
- mtx_destroy(&sel->mutex);
+ simple_mtx_destroy(&sel->mutex);
free(sel->tokens);
ralloc_free(sel->nir);
free(sel);
}
}
- if (name == TGSI_SEMANTIC_PRIMID)
- /* PrimID is written after the last output. */
+ if (j == vsinfo->num_outputs && name == TGSI_SEMANTIC_PRIMID)
+ /* PrimID is written after the last output when HW VS is used. */
ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[vsinfo->num_outputs]);
else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
/* No corresponding output found, load defaults into input.
pipe_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
- esgs_ring_size, alignment);
+ esgs_ring_size,
+ sctx->screen->info.pte_fragment_size);
if (!sctx->esgs_ring)
return false;
}
pipe_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
- gsvs_ring_size, alignment);
+ gsvs_ring_size,
+ sctx->screen->info.pte_fragment_size);
if (!sctx->gsvs_ring)
return false;
}
static void si_shader_lock(struct si_shader *shader)
{
- mtx_lock(&shader->selector->mutex);
+ simple_mtx_lock(&shader->selector->mutex);
if (shader->previous_stage_sel) {
assert(shader->previous_stage_sel != shader->selector);
- mtx_lock(&shader->previous_stage_sel->mutex);
+ simple_mtx_lock(&shader->previous_stage_sel->mutex);
}
}
static void si_shader_unlock(struct si_shader *shader)
{
if (shader->previous_stage_sel)
- mtx_unlock(&shader->previous_stage_sel->mutex);
- mtx_unlock(&shader->selector->mutex);
+ simple_mtx_unlock(&shader->previous_stage_sel->mutex);
+ simple_mtx_unlock(&shader->selector->mutex);
}
/**
return 1;
}
-static unsigned si_get_current_scratch_buffer_size(struct si_context *sctx)
-{
- return sctx->scratch_buffer ? sctx->scratch_buffer->b.b.width0 : 0;
-}
-
static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader)
{
return shader ? shader->config.scratch_bytes_per_wave : 0;
sctx->fixed_func_tcs_shader.current;
}
-static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx)
-{
- unsigned bytes = 0;
-
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
-
- if (sctx->tes_shader.cso) {
- struct si_shader *tcs = si_get_tcs_current(sctx);
-
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(tcs));
- }
- return bytes;
-}
-
static bool si_update_scratch_relocs(struct si_context *sctx)
{
struct si_shader *tcs = si_get_tcs_current(sctx);
if (r < 0)
return false;
if (r == 1) {
- if (sctx->tes_shader.current)
+ if (sctx->vs_shader.current->key.as_ls)
si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
- else if (sctx->gs_shader.current)
+ else if (sctx->vs_shader.current->key.as_es)
si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
+ else if (sctx->vs_shader.current->key.as_ngg)
+ si_pm4_bind_state(sctx, gs, sctx->vs_shader.current->pm4);
else
si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
}
if (r < 0)
return false;
if (r == 1) {
- if (sctx->gs_shader.current)
+ if (sctx->tes_shader.current->key.as_es)
si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
+ else if (sctx->tes_shader.current->key.as_ngg)
+ si_pm4_bind_state(sctx, gs, sctx->tes_shader.current->pm4);
else
si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
}
static bool si_update_spi_tmpring_size(struct si_context *sctx)
{
- unsigned current_scratch_buffer_size =
- si_get_current_scratch_buffer_size(sctx);
- unsigned scratch_bytes_per_wave =
- si_get_max_scratch_bytes_per_wave(sctx);
- unsigned scratch_needed_size = scratch_bytes_per_wave *
- sctx->scratch_waves;
+ /* SPI_TMPRING_SIZE.WAVESIZE must be constant for each scratch buffer.
+ * There are 2 cases to handle:
+ *
+ * - If the current needed size is less than the maximum seen size,
+ * use the maximum seen size, so that WAVESIZE remains the same.
+ *
+ * - If the current needed size is greater than the maximum seen size,
+ * the scratch buffer is reallocated, so we can increase WAVESIZE.
+ *
+ * Shaders that set SCRATCH_EN=0 don't allocate scratch space.
+ * Otherwise, the number of waves that can use scratch is
+ * SPI_TMPRING_SIZE.WAVES.
+ */
+ unsigned bytes = 0;
+
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
+
+ if (sctx->tes_shader.cso) {
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(si_get_tcs_current(sctx)));
+ }
+
+ sctx->max_seen_scratch_bytes_per_wave =
+ MAX2(sctx->max_seen_scratch_bytes_per_wave, bytes);
+
+ unsigned scratch_needed_size =
+ sctx->max_seen_scratch_bytes_per_wave * sctx->scratch_waves;
unsigned spi_tmpring_size;
if (scratch_needed_size > 0) {
- if (scratch_needed_size > current_scratch_buffer_size) {
+ if (!sctx->scratch_buffer ||
+ scratch_needed_size > sctx->scratch_buffer->b.b.width0) {
/* Create a bigger scratch buffer */
si_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
- SI_RESOURCE_FLAG_UNMAPPABLE,
- PIPE_USAGE_DEFAULT,
- scratch_needed_size, 256);
+ SI_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_needed_size,
+ sctx->screen->info.pte_fragment_size);
if (!sctx->scratch_buffer)
return false;
"scratch size should already be aligned correctly.");
spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) |
- S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
+ S_0286E8_WAVESIZE(sctx->max_seen_scratch_bytes_per_wave >> 10);
if (spi_tmpring_size != sctx->spi_tmpring_size) {
sctx->spi_tmpring_size = spi_tmpring_size;
si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state);
static void si_init_tess_factor_ring(struct si_context *sctx)
{
assert(!sctx->tess_rings);
+ assert(((sctx->screen->tess_factor_ring_size / 4) & C_030938_SIZE) == 0);
/* The address must be aligned to 2^19, because the shader only
* receives the high 13 bits.
S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4));
si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
factor_va >> 8);
- if (sctx->chip_class >= GFX9)
+ if (sctx->chip_class >= GFX10)
+ si_pm4_set_reg(sctx->init_config, R_030984_VGT_TF_MEMORY_BASE_HI_UMD,
+ S_030984_BASE_HI(factor_va >> 40));
+ else if (sctx->chip_class == GFX9)
si_pm4_set_reg(sctx->init_config, R_030944_VGT_TF_MEMORY_BASE_HI,
S_030944_BASE_HI(factor_va >> 40));
si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM,
if (screen->info.chip_class >= GFX9)
stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
+ if (screen->info.chip_class >= GFX10 && screen->ge_wave_size == 32) {
+ stages |= S_028B54_HS_W32_EN(1) |
+ S_028B54_GS_W32_EN(key.u.ngg) | /* legacy GS only supports Wave64 */
+ S_028B54_VS_W32_EN(1);
+ }
+
si_pm4_set_reg(pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
return pm4;
}
if (sctx->gs_shader.cso)
key.u.gs = 1;
- if (sctx->chip_class >= GFX10) {
- key.u.ngg = sctx->ngg;
-
- if (sctx->gs_shader.cso)
- key.u.streamout = !!sctx->gs_shader.cso->so.num_outputs;
- else if (sctx->tes_shader.cso)
- key.u.streamout = !!sctx->tes_shader.cso->so.num_outputs;
- else
- key.u.streamout = !!sctx->vs_shader.cso->so.num_outputs;
+ if (sctx->ngg) {
+ key.u.ngg = 1;
+ key.u.streamout = !!si_get_vs(sctx)->cso->so.num_outputs;
}
/* Update TCS and TES. */
sctx->ps_shader.cso->db_shader_control |
S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS);
- if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs) ||
+ if (si_pm4_state_changed(sctx, ps) ||
+ si_pm4_state_changed(sctx, vs) ||
+ (key.u.ngg && si_pm4_state_changed(sctx, gs)) ||
sctx->sprite_coord_enable != rs->sprite_coord_enable ||
sctx->flatshade != rs->flatshade) {
sctx->sprite_coord_enable = rs->sprite_coord_enable;
si_mark_atom_dirty(sctx, &sctx->atoms.s.spi_map);
}
- if (sctx->screen->rbplus_allowed &&
+ if (sctx->screen->info.rbplus_allowed &&
si_pm4_state_changed(sctx, ps) &&
(!old_ps ||
old_spi_shader_col_format !=