X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fradeonsi%2Fsi_state_shaders.c;h=6e77ca9fc59538d7ca35059ce3587c4845c0dcd3;hb=3be4ed2fe1ee8629a486156a764b451513d34aac;hp=de33d2503018c7c16829a7dea9dabfc810aab907;hpb=86b52d42368ac496fe24bc6674e754c323381635;p=mesa.git diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c index de33d250301..6e77ca9fc59 100644 --- a/src/gallium/drivers/radeonsi/si_state_shaders.c +++ b/src/gallium/drivers/radeonsi/si_state_shaders.c @@ -23,11 +23,10 @@ */ #include "si_build_pm4.h" -#include "gfx9d.h" +#include "sid.h" #include "compiler/nir/nir_serialize.h" #include "tgsi/tgsi_parse.h" -#include "tgsi/tgsi_ureg.h" #include "util/hash_table.h" #include "util/crc32.h" #include "util/u_async_debug.h" @@ -128,21 +127,21 @@ static uint32_t *read_chunk(uint32_t *ptr, void **data, unsigned *size) static void *si_get_shader_binary(struct si_shader *shader) { /* There is always a size of data followed by the data itself. */ - unsigned relocs_size = shader->binary.reloc_count * - sizeof(shader->binary.relocs[0]); - unsigned disasm_size = shader->binary.disasm_string ? - strlen(shader->binary.disasm_string) + 1 : 0; unsigned llvm_ir_size = shader->binary.llvm_ir_string ? strlen(shader->binary.llvm_ir_string) + 1 : 0; + + /* Refuse to allocate overly large buffers and guard against integer + * overflow. */ + if (shader->binary.elf_size > UINT_MAX / 4 || + llvm_ir_size > UINT_MAX / 4) + return NULL; + unsigned size = 4 + /* total size */ 4 + /* CRC32 of the data below */ align(sizeof(shader->config), 4) + align(sizeof(shader->info), 4) + - 4 + align(shader->binary.code_size, 4) + - 4 + align(shader->binary.rodata_size, 4) + - 4 + align(relocs_size, 4) + - 4 + align(disasm_size, 4) + + 4 + align(shader->binary.elf_size, 4) + 4 + align(llvm_ir_size, 4); void *buffer = CALLOC(1, size); uint32_t *ptr = (uint32_t*)buffer; @@ -155,10 +154,7 @@ static void *si_get_shader_binary(struct si_shader *shader) ptr = write_data(ptr, &shader->config, sizeof(shader->config)); ptr = write_data(ptr, &shader->info, sizeof(shader->info)); - ptr = write_chunk(ptr, shader->binary.code, shader->binary.code_size); - ptr = write_chunk(ptr, shader->binary.rodata, shader->binary.rodata_size); - ptr = write_chunk(ptr, shader->binary.relocs, relocs_size); - ptr = write_chunk(ptr, shader->binary.disasm_string, disasm_size); + ptr = write_chunk(ptr, shader->binary.elf_buffer, shader->binary.elf_size); ptr = write_chunk(ptr, shader->binary.llvm_ir_string, llvm_ir_size); assert((char *)ptr - (char *)buffer == size); @@ -176,6 +172,7 @@ static bool si_load_shader_binary(struct si_shader *shader, void *binary) uint32_t size = *ptr++; uint32_t crc32 = *ptr++; unsigned chunk_size; + unsigned elf_size; if (util_hash_crc32(ptr, size - 8) != crc32) { fprintf(stderr, "radeonsi: binary shader has invalid CRC32\n"); @@ -184,13 +181,9 @@ static bool si_load_shader_binary(struct si_shader *shader, void *binary) ptr = read_data(ptr, &shader->config, sizeof(shader->config)); ptr = read_data(ptr, &shader->info, sizeof(shader->info)); - ptr = read_chunk(ptr, (void**)&shader->binary.code, - &shader->binary.code_size); - ptr = read_chunk(ptr, (void**)&shader->binary.rodata, - &shader->binary.rodata_size); - ptr = read_chunk(ptr, (void**)&shader->binary.relocs, &chunk_size); - shader->binary.reloc_count = chunk_size / sizeof(shader->binary.relocs[0]); - ptr = read_chunk(ptr, (void**)&shader->binary.disasm_string, &chunk_size); + ptr = read_chunk(ptr, (void**)&shader->binary.elf_buffer, + &elf_size); + shader->binary.elf_size = elf_size; ptr = read_chunk(ptr, (void**)&shader->binary.llvm_ir_string, &chunk_size); return true; @@ -338,10 +331,10 @@ void si_destroy_shader_cache(struct si_screen *sscreen) /* SHADER STATES */ static void si_set_tesseval_regs(struct si_screen *sscreen, - struct si_shader_selector *tes, + const struct si_shader_selector *tes, struct si_pm4_state *pm4) { - struct tgsi_shader_info *info = &tes->info; + const struct tgsi_shader_info *info = &tes->info; unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE]; unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING]; bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW]; @@ -397,11 +390,11 @@ static void si_set_tesseval_regs(struct si_screen *sscreen, } else distribution_mode = V_028B6C_DISTRIBUTION_MODE_NO_DIST; - si_pm4_set_reg(pm4, R_028B6C_VGT_TF_PARAM, - S_028B6C_TYPE(type) | - S_028B6C_PARTITIONING(partitioning) | - S_028B6C_TOPOLOGY(topology) | - S_028B6C_DISTRIBUTION_MODE(distribution_mode)); + assert(pm4->shader); + pm4->shader->vgt_tf_param = S_028B6C_TYPE(type) | + S_028B6C_PARTITIONING(partitioning) | + S_028B6C_TOPOLOGY(topology) | + S_028B6C_DISTRIBUTION_MODE(distribution_mode); } /* Polaris needs different VTX_REUSE_DEPTH settings depending on @@ -425,7 +418,8 @@ static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen, { unsigned type = sel->type; - if (sscreen->info.family < CHIP_POLARIS10) + if (sscreen->info.family < CHIP_POLARIS10 || + sscreen->info.chip_class >= GFX10) return; /* VS as VS, or VS as ES: */ @@ -441,8 +435,8 @@ static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen, PIPE_TESS_SPACING_FRACTIONAL_ODD) vtx_reuse_depth = 14; - si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, - vtx_reuse_depth); + assert(pm4->shader); + pm4->shader->vgt_vertex_reuse_block_cntl = vtx_reuse_depth; } } @@ -453,18 +447,19 @@ static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader) else shader->pm4 = CALLOC_STRUCT(si_pm4_state); - return shader->pm4; + if (shader->pm4) { + shader->pm4->shader = shader; + return shader->pm4; + } else { + fprintf(stderr, "radeonsi: Failed to create pm4 state.\n"); + return NULL; + } } static unsigned si_get_num_vs_user_sgprs(unsigned num_always_on_user_sgprs) { /* Add the pointer to VBO descriptors. */ - if (HAVE_32BIT_POINTERS) { - return num_always_on_user_sgprs + 1; - } else { - assert(num_always_on_user_sgprs % 2 == 0); - return num_always_on_user_sgprs + 2; - } + return num_always_on_user_sgprs + 1; } static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader) @@ -473,7 +468,7 @@ static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader) unsigned vgpr_comp_cnt; uint64_t va; - assert(sscreen->info.chip_class <= VI); + assert(sscreen->info.chip_class <= GFX8); pm4 = si_get_shader_pm4_state(shader); if (!pm4) @@ -514,22 +509,39 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader) si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); if (sscreen->info.chip_class >= GFX9) { - si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8); - si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40)); + if (sscreen->info.chip_class >= GFX10) { + si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8); + si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40)); + } else { + si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8); + si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40)); + } /* We need at least 2 components for LS. - * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID). - * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded. + * GFX9 VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID). + * GFX10 VGPR0-3: (VertexID, RelAutoindex, UserVGPR1, InstanceID). + * On gfx9, StepRate0 is set to 1 so that VGPR3 doesn't have to + * be loaded. */ - ls_vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1; + ls_vgpr_comp_cnt = 1; + if (shader->info.uses_instanceid) { + if (sscreen->info.chip_class >= GFX10) + ls_vgpr_comp_cnt = 3; + else + ls_vgpr_comp_cnt = 2; + } unsigned num_user_sgprs = si_get_num_vs_user_sgprs(GFX9_TCS_NUM_USER_SGPR); shader->config.rsrc2 = S_00B42C_USER_SGPR(num_user_sgprs) | - S_00B42C_USER_SGPR_MSB(num_user_sgprs >> 5) | S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0); + + if (sscreen->info.chip_class >= GFX10) + shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5); + else + shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5); } else { si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8); si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, S_00B424_MEM_BASE(va >> 40)); @@ -542,17 +554,45 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader) si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS, S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) | + (sscreen->info.chip_class <= GFX9 ? + S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) : 0) | S_00B428_DX10_CLAMP(1) | + S_00B428_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | S_00B428_FLOAT_MODE(shader->config.float_mode) | S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt)); - if (sscreen->info.chip_class <= VI) { + if (sscreen->info.chip_class <= GFX8) { si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, shader->config.rsrc2); } } +static void si_emit_shader_es(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.es->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE, + SI_TRACKED_VGT_ESGS_RING_ITEMSIZE, + shader->selector->esgs_itemsize / 4); + + if (shader->selector->type == PIPE_SHADER_TESS_EVAL) + radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, + SI_TRACKED_VGT_TF_PARAM, + shader->vgt_tf_param); + + if (shader->vgt_vertex_reuse_block_cntl) + radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, + SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL, + shader->vgt_vertex_reuse_block_cntl); + + if (initial_cdw != sctx->gfx_cs->current.cdw) + sctx->context_roll = true; +} + static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) { struct si_pm4_state *pm4; @@ -561,12 +601,13 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) uint64_t va; unsigned oc_lds_en; - assert(sscreen->info.chip_class <= VI); + assert(sscreen->info.chip_class <= GFX8); pm4 = si_get_shader_pm4_state(shader); if (!pm4) return; + pm4->atom.emit = si_emit_shader_es; va = shader->bo->gpu_address; si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); @@ -582,8 +623,6 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0; - si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE, - shader->selector->esgs_itemsize / 4); si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8); si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(va >> 40)); si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES, @@ -603,41 +642,9 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4); } -static unsigned si_conv_prim_to_gs_out(unsigned mode) -{ - static const int prim_conv[] = { - [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST, - [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST, - }; - assert(mode < ARRAY_SIZE(prim_conv)); - - return prim_conv[mode]; -} - -struct gfx9_gs_info { - unsigned es_verts_per_subgroup; - unsigned gs_prims_per_subgroup; - unsigned gs_inst_prims_in_subgroup; - unsigned max_prims_per_subgroup; - unsigned lds_size; -}; - -static void gfx9_get_gs_info(struct si_shader_selector *es, - struct si_shader_selector *gs, - struct gfx9_gs_info *out) +void gfx9_get_gs_info(struct si_shader_selector *es, + struct si_shader_selector *gs, + struct gfx9_gs_info *out) { unsigned gs_num_invocations = MAX2(gs->gs_num_invocations, 1); unsigned input_prim = gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]; @@ -658,8 +665,6 @@ static void gfx9_get_gs_info(struct si_shader_selector *es, unsigned max_gs_prims, gs_prims; unsigned min_es_verts, es_verts, worst_case_es_verts; - assert(gs_num_invocations <= 32); /* GL maximum */ - if (uses_adjacency || gs_num_invocations > 1) max_gs_prims = 127 / gs_num_invocations; else @@ -730,11 +735,79 @@ static void gfx9_get_gs_info(struct si_shader_selector *es, out->gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations; out->max_prims_per_subgroup = out->gs_inst_prims_in_subgroup * gs->gs_max_out_vertices; - out->lds_size = align(esgs_lds_size, 128) / 128; + out->esgs_ring_size = 4 * esgs_lds_size; assert(out->max_prims_per_subgroup <= max_out_prims); } +static void si_emit_shader_gs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + /* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2 + * R_028A68_VGT_GSVS_RING_OFFSET_3 */ + radeon_opt_set_context_reg3(sctx, R_028A60_VGT_GSVS_RING_OFFSET_1, + SI_TRACKED_VGT_GSVS_RING_OFFSET_1, + shader->ctx_reg.gs.vgt_gsvs_ring_offset_1, + shader->ctx_reg.gs.vgt_gsvs_ring_offset_2, + shader->ctx_reg.gs.vgt_gsvs_ring_offset_3); + + /* R_028AB0_VGT_GSVS_RING_ITEMSIZE */ + radeon_opt_set_context_reg(sctx, R_028AB0_VGT_GSVS_RING_ITEMSIZE, + SI_TRACKED_VGT_GSVS_RING_ITEMSIZE, + shader->ctx_reg.gs.vgt_gsvs_ring_itemsize); + + /* R_028B38_VGT_GS_MAX_VERT_OUT */ + radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, + SI_TRACKED_VGT_GS_MAX_VERT_OUT, + shader->ctx_reg.gs.vgt_gs_max_vert_out); + + /* R_028B5C_VGT_GS_VERT_ITEMSIZE, R_028B60_VGT_GS_VERT_ITEMSIZE_1 + * R_028B64_VGT_GS_VERT_ITEMSIZE_2, R_028B68_VGT_GS_VERT_ITEMSIZE_3 */ + radeon_opt_set_context_reg4(sctx, R_028B5C_VGT_GS_VERT_ITEMSIZE, + SI_TRACKED_VGT_GS_VERT_ITEMSIZE, + shader->ctx_reg.gs.vgt_gs_vert_itemsize, + shader->ctx_reg.gs.vgt_gs_vert_itemsize_1, + shader->ctx_reg.gs.vgt_gs_vert_itemsize_2, + shader->ctx_reg.gs.vgt_gs_vert_itemsize_3); + + /* R_028B90_VGT_GS_INSTANCE_CNT */ + radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT, + SI_TRACKED_VGT_GS_INSTANCE_CNT, + shader->ctx_reg.gs.vgt_gs_instance_cnt); + + if (sctx->chip_class >= GFX9) { + /* R_028A44_VGT_GS_ONCHIP_CNTL */ + radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, + SI_TRACKED_VGT_GS_ONCHIP_CNTL, + shader->ctx_reg.gs.vgt_gs_onchip_cntl); + /* R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP */ + radeon_opt_set_context_reg(sctx, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, + SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP, + shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup); + /* R_028AAC_VGT_ESGS_RING_ITEMSIZE */ + radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE, + SI_TRACKED_VGT_ESGS_RING_ITEMSIZE, + shader->ctx_reg.gs.vgt_esgs_ring_itemsize); + + if (shader->key.part.gs.es->type == PIPE_SHADER_TESS_EVAL) + radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, + SI_TRACKED_VGT_TF_PARAM, + shader->vgt_tf_param); + if (shader->vgt_vertex_reuse_block_cntl) + radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, + SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL, + shader->vgt_vertex_reuse_block_cntl); + } + + if (initial_cdw != sctx->gfx_cs->current.cdw) + sctx->context_roll = true; +} + static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) { struct si_shader_selector *sel = shader->selector; @@ -749,33 +822,35 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) if (!pm4) return; + pm4->atom.emit = si_emit_shader_gs; + offset = num_components[0] * sel->gs_max_out_vertices; - si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, offset); + shader->ctx_reg.gs.vgt_gsvs_ring_offset_1 = offset; + if (max_stream >= 1) offset += num_components[1] * sel->gs_max_out_vertices; - si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, offset); + shader->ctx_reg.gs.vgt_gsvs_ring_offset_2 = offset; + if (max_stream >= 2) offset += num_components[2] * sel->gs_max_out_vertices; - si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, offset); - si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE, - si_conv_prim_to_gs_out(sel->gs_output_prim)); + shader->ctx_reg.gs.vgt_gsvs_ring_offset_3 = offset; + if (max_stream >= 3) offset += num_components[3] * sel->gs_max_out_vertices; - si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset); + shader->ctx_reg.gs.vgt_gsvs_ring_itemsize = offset; /* The GSVS_RING_ITEMSIZE register takes 15 bits */ assert(offset < (1 << 15)); - si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, sel->gs_max_out_vertices); + shader->ctx_reg.gs.vgt_gs_max_vert_out = sel->gs_max_out_vertices; - si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, num_components[0]); - si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? num_components[1] : 0); - si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? num_components[2] : 0); - si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? num_components[3] : 0); + shader->ctx_reg.gs.vgt_gs_vert_itemsize = num_components[0]; + shader->ctx_reg.gs.vgt_gs_vert_itemsize_1 = (max_stream >= 1) ? num_components[1] : 0; + shader->ctx_reg.gs.vgt_gs_vert_itemsize_2 = (max_stream >= 2) ? num_components[2] : 0; + shader->ctx_reg.gs.vgt_gs_vert_itemsize_3 = (max_stream >= 3) ? num_components[3] : 0; - si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT, - S_028B90_CNT(MIN2(gs_num_invocations, 127)) | - S_028B90_ENABLE(gs_num_invocations > 0)); + shader->ctx_reg.gs.vgt_gs_instance_cnt = S_028B90_CNT(MIN2(gs_num_invocations, 127)) | + S_028B90_ENABLE(gs_num_invocations > 0); va = shader->bo->gpu_address; si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); @@ -784,7 +859,6 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]; unsigned es_type = shader->key.part.gs.es->type; unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt; - struct gfx9_gs_info gs_info; if (es_type == PIPE_SHADER_VERTEX) /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */ @@ -812,33 +886,45 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) else num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR; - gfx9_get_gs_info(shader->key.part.gs.es, sel, &gs_info); + if (sscreen->info.chip_class >= GFX10) { + si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8); + si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(va >> 40)); + } else { + si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8); + si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40)); + } - si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8); - si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40)); + uint32_t rsrc1 = + S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | + S_00B228_DX10_CLAMP(1) | + S_00B228_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B228_FLOAT_MODE(shader->config.float_mode) | + S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt); + uint32_t rsrc2 = + S_00B22C_USER_SGPR(num_user_sgprs) | + S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) | + S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) | + S_00B22C_LDS_SIZE(shader->config.lds_size) | + S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0); + + if (sscreen->info.chip_class >= GFX10) { + rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5); + } else { + rsrc1 |= S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8); + rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5); + } - si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, - S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) | - S_00B228_DX10_CLAMP(1) | - S_00B228_FLOAT_MODE(shader->config.float_mode) | - S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt)); - si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, - S_00B22C_USER_SGPR(num_user_sgprs) | - S_00B22C_USER_SGPR_MSB(num_user_sgprs >> 5) | - S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) | - S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) | - S_00B22C_LDS_SIZE(gs_info.lds_size) | - S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); + si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, rsrc1); + si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, rsrc2); - si_pm4_set_reg(pm4, R_028A44_VGT_GS_ONCHIP_CNTL, - S_028A44_ES_VERTS_PER_SUBGRP(gs_info.es_verts_per_subgroup) | - S_028A44_GS_PRIMS_PER_SUBGRP(gs_info.gs_prims_per_subgroup) | - S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info.gs_inst_prims_in_subgroup)); - si_pm4_set_reg(pm4, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, - S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info.max_prims_per_subgroup)); - si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE, - shader->key.part.gs.es->esgs_itemsize / 4); + shader->ctx_reg.gs.vgt_gs_onchip_cntl = + S_028A44_ES_VERTS_PER_SUBGRP(shader->gs_info.es_verts_per_subgroup) | + S_028A44_GS_PRIMS_PER_SUBGRP(shader->gs_info.gs_prims_per_subgroup) | + S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->gs_info.gs_inst_prims_in_subgroup); + shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup = + S_028A94_MAX_PRIMS_PER_SUBGROUP(shader->gs_info.max_prims_per_subgroup); + shader->ctx_reg.gs.vgt_esgs_ring_itemsize = + shader->key.part.gs.es->esgs_itemsize / 4; if (es_type == PIPE_SHADER_TESS_EVAL) si_set_tesseval_regs(sscreen, shader->key.part.gs.es, pm4); @@ -860,6 +946,348 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) } } +/* Common tail code for NGG primitive shaders. */ +static void gfx10_emit_shader_ngg_tail(struct si_context *sctx, + struct si_shader *shader, + unsigned initial_cdw) +{ + radeon_opt_set_context_reg(sctx, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP, + SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP, + shader->ctx_reg.ngg.ge_max_output_per_subgroup); + radeon_opt_set_context_reg(sctx, R_028B4C_GE_NGG_SUBGRP_CNTL, + SI_TRACKED_GE_NGG_SUBGRP_CNTL, + shader->ctx_reg.ngg.ge_ngg_subgrp_cntl); + radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN, + SI_TRACKED_VGT_PRIMITIVEID_EN, + shader->ctx_reg.ngg.vgt_primitiveid_en); + radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, + SI_TRACKED_VGT_GS_ONCHIP_CNTL, + shader->ctx_reg.ngg.vgt_gs_onchip_cntl); + radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT, + SI_TRACKED_VGT_GS_INSTANCE_CNT, + shader->ctx_reg.ngg.vgt_gs_instance_cnt); + radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE, + SI_TRACKED_VGT_ESGS_RING_ITEMSIZE, + shader->ctx_reg.ngg.vgt_esgs_ring_itemsize); + radeon_opt_set_context_reg(sctx, R_028AB4_VGT_REUSE_OFF, + SI_TRACKED_VGT_REUSE_OFF, + shader->ctx_reg.ngg.vgt_reuse_off); + radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG, + SI_TRACKED_SPI_VS_OUT_CONFIG, + shader->ctx_reg.ngg.spi_vs_out_config); + radeon_opt_set_context_reg2(sctx, R_028708_SPI_SHADER_IDX_FORMAT, + SI_TRACKED_SPI_SHADER_IDX_FORMAT, + shader->ctx_reg.ngg.spi_shader_idx_format, + shader->ctx_reg.ngg.spi_shader_pos_format); + radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL, + SI_TRACKED_PA_CL_VTE_CNTL, + shader->ctx_reg.ngg.pa_cl_vte_cntl); + radeon_opt_set_context_reg(sctx, R_028838_PA_CL_NGG_CNTL, + SI_TRACKED_PA_CL_NGG_CNTL, + shader->ctx_reg.ngg.pa_cl_ngg_cntl); + + if (initial_cdw != sctx->gfx_cs->current.cdw) + sctx->context_roll = true; + + if (shader->ge_cntl != sctx->last_multi_vgt_param) { + radeon_set_uconfig_reg(sctx->gfx_cs, R_03096C_GE_CNTL, shader->ge_cntl); + sctx->last_multi_vgt_param = shader->ge_cntl; + } +} + +static void gfx10_emit_shader_ngg_notess_nogs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +static void gfx10_emit_shader_ngg_tess_nogs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, + SI_TRACKED_VGT_TF_PARAM, + shader->vgt_tf_param); + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +static void gfx10_emit_shader_ngg_notess_gs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, + SI_TRACKED_VGT_GS_MAX_VERT_OUT, + shader->ctx_reg.ngg.vgt_gs_max_vert_out); + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +static void gfx10_emit_shader_ngg_tess_gs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, + SI_TRACKED_VGT_GS_MAX_VERT_OUT, + shader->ctx_reg.ngg.vgt_gs_max_vert_out); + radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, + SI_TRACKED_VGT_TF_PARAM, + shader->vgt_tf_param); + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +unsigned si_get_input_prim(const struct si_shader_selector *gs) +{ + if (gs->type == PIPE_SHADER_GEOMETRY) + return gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]; + + if (gs->type == PIPE_SHADER_TESS_EVAL) { + if (gs->info.properties[TGSI_PROPERTY_TES_POINT_MODE]) + return PIPE_PRIM_POINTS; + if (gs->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES) + return PIPE_PRIM_LINES; + return PIPE_PRIM_TRIANGLES; + } + + /* TODO: Set this correctly if the primitive type is set in the shader key. */ + return PIPE_PRIM_TRIANGLES; +} + +/** + * Prepare the PM4 image for \p shader, which will run as a merged ESGS shader + * in NGG mode. + */ +static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader) +{ + const struct si_shader_selector *gs_sel = shader->selector; + const struct tgsi_shader_info *gs_info = &gs_sel->info; + enum pipe_shader_type gs_type = shader->selector->type; + const struct si_shader_selector *es_sel = + shader->previous_stage_sel ? shader->previous_stage_sel : shader->selector; + const struct tgsi_shader_info *es_info = &es_sel->info; + enum pipe_shader_type es_type = es_sel->type; + unsigned num_user_sgprs; + unsigned nparams, es_vgpr_comp_cnt, gs_vgpr_comp_cnt; + uint64_t va; + unsigned window_space = + gs_info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION]; + bool es_enable_prim_id = shader->key.mono.u.vs_export_prim_id || es_info->uses_primid; + unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1); + unsigned input_prim = si_get_input_prim(gs_sel); + bool break_wave_at_eoi = false; + struct si_pm4_state *pm4 = si_get_shader_pm4_state(shader); + if (!pm4) + return; + + if (es_type == PIPE_SHADER_TESS_EVAL) { + pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_tess_gs + : gfx10_emit_shader_ngg_tess_nogs; + } else { + pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_notess_gs + : gfx10_emit_shader_ngg_notess_nogs; + } + + va = shader->bo->gpu_address; + si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); + + if (es_type == PIPE_SHADER_VERTEX) { + /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */ + es_vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 0; + + if (es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) { + num_user_sgprs = SI_SGPR_VS_BLIT_DATA + + es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]; + } else { + num_user_sgprs = si_get_num_vs_user_sgprs(GFX9_VSGS_NUM_USER_SGPR); + } + } else { + assert(es_type == PIPE_SHADER_TESS_EVAL); + es_vgpr_comp_cnt = es_enable_prim_id ? 3 : 2; + num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR; + + if (es_enable_prim_id || gs_info->uses_primid) + break_wave_at_eoi = true; + } + + /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and + * VGPR[0:4] are always loaded. + * + * Vertex shaders always need to load VGPR3, because they need to + * pass edge flags for decomposed primitives (such as quads) to the PA + * for the GL_LINE polygon mode to skip rendering lines on inner edges. + */ + if (gs_info->uses_invocationid || gs_type == PIPE_SHADER_VERTEX) + gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID, edge flags. */ + else if (gs_info->uses_primid) + gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */ + else if (input_prim >= PIPE_PRIM_TRIANGLES) + gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */ + else + gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */ + + si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8); + si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40); + si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, + S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | + S_00B228_FLOAT_MODE(shader->config.float_mode) | + S_00B228_DX10_CLAMP(1) | + S_00B228_MEM_ORDERED(1) | + S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt)); + si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, + S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0) | + S_00B22C_USER_SGPR(num_user_sgprs) | + S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) | + S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5) | + S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) | + S_00B22C_LDS_SIZE(shader->config.lds_size)); + + /* TODO: Use NO_PC_EXPORT when applicable. */ + nparams = MAX2(shader->info.nr_param_exports, 1); + shader->ctx_reg.ngg.spi_vs_out_config = + S_0286C4_VS_EXPORT_COUNT(nparams - 1); + + shader->ctx_reg.ngg.spi_shader_idx_format = + S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP); + shader->ctx_reg.ngg.spi_shader_pos_format = + S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | + S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE) | + S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE) | + S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE); + + shader->ctx_reg.ngg.vgt_primitiveid_en = + S_028A84_PRIMITIVEID_EN(es_enable_prim_id) | + S_028A84_NGG_DISABLE_PROVOK_REUSE(es_enable_prim_id); + + if (gs_type == PIPE_SHADER_GEOMETRY) { + shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = es_sel->esgs_itemsize / 4; + shader->ctx_reg.ngg.vgt_gs_max_vert_out = gs_sel->gs_max_out_vertices; + } else { + shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = 1; + } + + if (es_type == PIPE_SHADER_TESS_EVAL) + si_set_tesseval_regs(sscreen, es_sel, pm4); + + shader->ctx_reg.ngg.vgt_gs_onchip_cntl = + S_028A44_ES_VERTS_PER_SUBGRP(shader->ngg.hw_max_esverts) | + S_028A44_GS_PRIMS_PER_SUBGRP(shader->ngg.max_gsprims) | + S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->ngg.max_gsprims * gs_num_invocations); + shader->ctx_reg.ngg.ge_max_output_per_subgroup = + S_0287FC_MAX_VERTS_PER_SUBGROUP(shader->ngg.max_out_verts); + shader->ctx_reg.ngg.ge_ngg_subgrp_cntl = + S_028B4C_PRIM_AMP_FACTOR(shader->ngg.prim_amp_factor) | + S_028B4C_THDS_PER_SUBGRP(0); /* for fast launch */ + shader->ctx_reg.ngg.vgt_gs_instance_cnt = + S_028B90_CNT(gs_num_invocations) | + S_028B90_ENABLE(gs_num_invocations > 1) | + S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE( + shader->ngg.max_vert_out_per_gs_instance); + + /* User edge flags are set by the pos exports. If user edge flags are + * not used, we must use hw-generated edge flags and pass them via + * the prim export to prevent drawing lines on internal edges of + * decomposed primitives (such as quads) with polygon mode = lines. + * + * TODO: We should combine hw-generated edge flags with user edge + * flags in the shader. + */ + shader->ctx_reg.ngg.pa_cl_ngg_cntl = + S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type == PIPE_SHADER_VERTEX && + !gs_info->writes_edgeflag); + + shader->ge_cntl = + S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) | + S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts) | + S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi); + + if (window_space) { + shader->ctx_reg.ngg.pa_cl_vte_cntl = + S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1); + } else { + shader->ctx_reg.ngg.pa_cl_vte_cntl = + S_028818_VTX_W0_FMT(1) | + S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | + S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | + S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1); + } + + shader->ctx_reg.ngg.vgt_reuse_off = + S_028AB4_REUSE_OFF(sscreen->info.family == CHIP_NAVI10 && + sscreen->info.chip_external_rev == 0x1 && + es_type == PIPE_SHADER_TESS_EVAL); +} + +static void si_emit_shader_vs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.vs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028A40_VGT_GS_MODE, + SI_TRACKED_VGT_GS_MODE, + shader->ctx_reg.vs.vgt_gs_mode); + radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN, + SI_TRACKED_VGT_PRIMITIVEID_EN, + shader->ctx_reg.vs.vgt_primitiveid_en); + + if (sctx->chip_class <= GFX8) { + radeon_opt_set_context_reg(sctx, R_028AB4_VGT_REUSE_OFF, + SI_TRACKED_VGT_REUSE_OFF, + shader->ctx_reg.vs.vgt_reuse_off); + } + + radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG, + SI_TRACKED_SPI_VS_OUT_CONFIG, + shader->ctx_reg.vs.spi_vs_out_config); + + radeon_opt_set_context_reg(sctx, R_02870C_SPI_SHADER_POS_FORMAT, + SI_TRACKED_SPI_SHADER_POS_FORMAT, + shader->ctx_reg.vs.spi_shader_pos_format); + + radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL, + SI_TRACKED_PA_CL_VTE_CNTL, + shader->ctx_reg.vs.pa_cl_vte_cntl); + + if (shader->selector->type == PIPE_SHADER_TESS_EVAL) + radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, + SI_TRACKED_VGT_TF_PARAM, + shader->vgt_tf_param); + + if (shader->vgt_vertex_reuse_block_cntl) + radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, + SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL, + shader->vgt_vertex_reuse_block_cntl); + + if (initial_cdw != sctx->gfx_cs->current.cdw) + sctx->context_roll = true; +} + /** * Compute the state for \p shader, which will run as a vertex shader on the * hardware. @@ -872,18 +1300,19 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, { const struct tgsi_shader_info *info = &shader->selector->info; struct si_pm4_state *pm4; - unsigned num_user_sgprs; - unsigned nparams, vgpr_comp_cnt; + unsigned num_user_sgprs, vgpr_comp_cnt; uint64_t va; - unsigned oc_lds_en; + unsigned nparams, oc_lds_en; unsigned window_space = - info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION]; + info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION]; bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || info->uses_primid; pm4 = si_get_shader_pm4_state(shader); if (!pm4) return; + pm4->atom.emit = si_emit_shader_vs; + /* We always write VGT_GS_MODE in the VS state, because every switch * between different shader pipelines involving a different GS or no * GS at all involves a switch of the VS (different GS use different @@ -898,19 +1327,18 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, if (enable_prim_id) mode = V_028A40_GS_SCENARIO_A; - si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, S_028A40_MODE(mode)); - si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id); + shader->ctx_reg.vs.vgt_gs_mode = S_028A40_MODE(mode); + shader->ctx_reg.vs.vgt_primitiveid_en = enable_prim_id; } else { - si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, - ac_vgt_gs_mode(gs->gs_max_out_vertices, - sscreen->info.chip_class)); - si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0); + shader->ctx_reg.vs.vgt_gs_mode = ac_vgt_gs_mode(gs->gs_max_out_vertices, + sscreen->info.chip_class); + shader->ctx_reg.vs.vgt_primitiveid_en = 0; } - if (sscreen->info.chip_class <= VI) { + if (sscreen->info.chip_class <= GFX8) { /* Reuse needs to be set off if we write oViewport. */ - si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF, - S_028AB4_REUSE_OFF(info->writes_viewport_index)); + shader->ctx_reg.vs.vgt_reuse_off = + S_028AB4_REUSE_OFF(info->writes_viewport_index); } va = shader->bo->gpu_address; @@ -940,49 +1368,55 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, /* VS is required to export at least one param. */ nparams = MAX2(shader->info.nr_param_exports, 1); - si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG, - S_0286C4_VS_EXPORT_COUNT(nparams - 1)); - - si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT, - S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | - S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? - V_02870C_SPI_SHADER_4COMP : - V_02870C_SPI_SHADER_NONE) | - S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ? - V_02870C_SPI_SHADER_4COMP : - V_02870C_SPI_SHADER_NONE) | - S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? - V_02870C_SPI_SHADER_4COMP : - V_02870C_SPI_SHADER_NONE)); + shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1); + + shader->ctx_reg.vs.spi_shader_pos_format = + S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | + S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE) | + S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE) | + S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE); oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0; si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8); si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(va >> 40)); - si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, - S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8) | - S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) | - S_00B128_DX10_CLAMP(1) | - S_00B128_FLOAT_MODE(shader->config.float_mode)); - si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, - S_00B12C_USER_SGPR(num_user_sgprs) | - S_00B12C_OC_LDS_EN(oc_lds_en) | - S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) | - S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) | - S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) | - S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) | - S_00B12C_SO_EN(!!shader->selector->so.num_outputs) | - S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); + + uint32_t rsrc1 = S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) | + S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) | + S_00B128_DX10_CLAMP(1) | + S_00B128_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B128_FLOAT_MODE(shader->config.float_mode); + uint32_t rsrc2 = S_00B12C_USER_SGPR(num_user_sgprs) | + S_00B12C_OC_LDS_EN(oc_lds_en) | + S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0); + + if (sscreen->info.chip_class <= GFX9) { + rsrc1 |= S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8); + rsrc2 |= S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) | + S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) | + S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) | + S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) | + S_00B12C_SO_EN(!!shader->selector->so.num_outputs); + } + + si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, rsrc1); + si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, rsrc2); + if (window_space) - si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL, - S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1)); + shader->ctx_reg.vs.pa_cl_vte_cntl = + S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1); else - si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL, - S_028818_VTX_W0_FMT(1) | - S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | - S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | - S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); + shader->ctx_reg.vs.pa_cl_vte_cntl = + S_028818_VTX_W0_FMT(1) | + S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | + S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | + S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1); if (shader->selector->type == PIPE_SHADER_TESS_EVAL) si_set_tesseval_regs(sscreen, shader->selector, pm4); @@ -1017,7 +1451,42 @@ static unsigned si_get_spi_shader_col_format(struct si_shader *shader) return value; } -static void si_shader_ps(struct si_shader *shader) +static void si_emit_shader_ps(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.ps->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + /* R_0286CC_SPI_PS_INPUT_ENA, R_0286D0_SPI_PS_INPUT_ADDR*/ + radeon_opt_set_context_reg2(sctx, R_0286CC_SPI_PS_INPUT_ENA, + SI_TRACKED_SPI_PS_INPUT_ENA, + shader->ctx_reg.ps.spi_ps_input_ena, + shader->ctx_reg.ps.spi_ps_input_addr); + + radeon_opt_set_context_reg(sctx, R_0286E0_SPI_BARYC_CNTL, + SI_TRACKED_SPI_BARYC_CNTL, + shader->ctx_reg.ps.spi_baryc_cntl); + radeon_opt_set_context_reg(sctx, R_0286D8_SPI_PS_IN_CONTROL, + SI_TRACKED_SPI_PS_IN_CONTROL, + shader->ctx_reg.ps.spi_ps_in_control); + + /* R_028710_SPI_SHADER_Z_FORMAT, R_028714_SPI_SHADER_COL_FORMAT */ + radeon_opt_set_context_reg2(sctx, R_028710_SPI_SHADER_Z_FORMAT, + SI_TRACKED_SPI_SHADER_Z_FORMAT, + shader->ctx_reg.ps.spi_shader_z_format, + shader->ctx_reg.ps.spi_shader_col_format); + + radeon_opt_set_context_reg(sctx, R_02823C_CB_SHADER_MASK, + SI_TRACKED_CB_SHADER_MASK, + shader->ctx_reg.ps.cb_shader_mask); + + if (initial_cdw != sctx->gfx_cs->current.cdw) + sctx->context_roll = true; +} + +static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader) { struct tgsi_shader_info *info = &shader->selector->info; struct si_pm4_state *pm4; @@ -1074,6 +1543,8 @@ static void si_shader_ps(struct si_shader *shader) if (!pm4) return; + pm4->atom.emit = si_emit_shader_ps; + /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION * Possible vaules: * 0 -> Position = pixel center @@ -1116,35 +1587,37 @@ static void si_shader_ps(struct si_shader *shader) !info->writes_z && !info->writes_stencil && !info->writes_samplemask) spi_shader_col_format = V_028714_SPI_SHADER_32_R; - si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, input_ena); - si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, - shader->config.spi_ps_input_addr); + shader->ctx_reg.ps.spi_ps_input_ena = input_ena; + shader->ctx_reg.ps.spi_ps_input_addr = shader->config.spi_ps_input_addr; /* Set interpolation controls. */ spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader)); - /* Set registers. */ - si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl); - si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control); - - si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, - ac_get_spi_shader_z_format(info->writes_z, - info->writes_stencil, - info->writes_samplemask)); - - si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT, spi_shader_col_format); - si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, cb_shader_mask); + shader->ctx_reg.ps.spi_baryc_cntl = spi_baryc_cntl; + shader->ctx_reg.ps.spi_ps_in_control = spi_ps_in_control; + shader->ctx_reg.ps.spi_shader_z_format = + ac_get_spi_shader_z_format(info->writes_z, + info->writes_stencil, + info->writes_samplemask); + shader->ctx_reg.ps.spi_shader_col_format = spi_shader_col_format; + shader->ctx_reg.ps.cb_shader_mask = cb_shader_mask; va = shader->bo->gpu_address; si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8); si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, S_00B024_MEM_BASE(va >> 40)); - si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, - S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8) | - S_00B028_DX10_CLAMP(1) | - S_00B028_FLOAT_MODE(shader->config.float_mode)); + uint32_t rsrc1 = + S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) | + S_00B028_DX10_CLAMP(1) | + S_00B028_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B028_FLOAT_MODE(shader->config.float_mode); + + if (sscreen->info.chip_class < GFX10) { + rsrc1 |= S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8); + } + + si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, rsrc1); si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS, S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) | S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) | @@ -1160,6 +1633,8 @@ static void si_shader_init_pm4_state(struct si_screen *sscreen, si_shader_ls(sscreen, shader); else if (shader->key.as_es) si_shader_es(sscreen, shader); + else if (shader->key.as_ngg) + gfx10_shader_ngg(sscreen, shader); else si_shader_vs(sscreen, shader, NULL); break; @@ -1169,14 +1644,19 @@ static void si_shader_init_pm4_state(struct si_screen *sscreen, case PIPE_SHADER_TESS_EVAL: if (shader->key.as_es) si_shader_es(sscreen, shader); + else if (shader->key.as_ngg) + gfx10_shader_ngg(sscreen, shader); else si_shader_vs(sscreen, shader, NULL); break; case PIPE_SHADER_GEOMETRY: - si_shader_gs(sscreen, shader); + if (shader->key.as_ngg) + gfx10_shader_ngg(sscreen, shader); + else + si_shader_gs(sscreen, shader); break; case PIPE_SHADER_FRAGMENT: - si_shader_ps(shader); + si_shader_ps(sscreen, shader); break; default: assert(0); @@ -1192,27 +1672,53 @@ static unsigned si_get_alpha_test_func(struct si_context *sctx) return PIPE_FUNC_ALWAYS; } -static void si_shader_selector_key_vs(struct si_context *sctx, - struct si_shader_selector *vs, - struct si_shader_key *key, - struct si_vs_prolog_bits *prolog_key) +void si_shader_selector_key_vs(struct si_context *sctx, + struct si_shader_selector *vs, + struct si_shader_key *key, + struct si_vs_prolog_bits *prolog_key) { - if (!sctx->vertex_elements) + if (!sctx->vertex_elements || + vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) return; - prolog_key->instance_divisor_is_one = - sctx->vertex_elements->instance_divisor_is_one; - prolog_key->instance_divisor_is_fetched = - sctx->vertex_elements->instance_divisor_is_fetched; + struct si_vertex_elements *elts = sctx->vertex_elements; + + prolog_key->instance_divisor_is_one = elts->instance_divisor_is_one; + prolog_key->instance_divisor_is_fetched = elts->instance_divisor_is_fetched; + prolog_key->unpack_instance_id_from_vertex_id = + sctx->prim_discard_cs_instancing; /* Prefer a monolithic shader to allow scheduling divisions around * VBO loads. */ if (prolog_key->instance_divisor_is_fetched) key->opt.prefer_mono = 1; - unsigned count = MIN2(vs->info.num_inputs, - sctx->vertex_elements->count); - memcpy(key->mono.vs_fix_fetch, sctx->vertex_elements->fix_fetch, count); + unsigned count = MIN2(vs->info.num_inputs, elts->count); + unsigned count_mask = (1 << count) - 1; + unsigned fix = elts->fix_fetch_always & count_mask; + unsigned opencode = elts->fix_fetch_opencode & count_mask; + + if (sctx->vertex_buffer_unaligned & elts->vb_alignment_check_mask) { + uint32_t mask = elts->fix_fetch_unaligned & count_mask; + while (mask) { + unsigned i = u_bit_scan(&mask); + unsigned log_hw_load_size = 1 + ((elts->hw_load_is_dword >> i) & 1); + unsigned vbidx = elts->vertex_buffer_index[i]; + struct pipe_vertex_buffer *vb = &sctx->vertex_buffer[vbidx]; + unsigned align_mask = (1 << log_hw_load_size) - 1; + if (vb->buffer_offset & align_mask || + vb->stride & align_mask) { + fix |= 1 << i; + opencode |= 1 << i; + } + } + } + + while (fix) { + unsigned i = u_bit_scan(&fix); + key->mono.vs_fix_fetch[i].bits = elts->fix_fetch[i]; + } + key->mono.vs_fetch_opencode = opencode; } static void si_shader_selector_key_hw_vs(struct si_context *sctx, @@ -1267,6 +1773,7 @@ static void si_shader_selector_key_hw_vs(struct si_context *sctx, /* Compute the key for the hw shader variant */ static inline void si_shader_selector_key(struct pipe_context *ctx, struct si_shader_selector *sel, + union si_vgt_stages_key stages_key, struct si_shader_key *key) { struct si_context *sctx = (struct si_context *)ctx; @@ -1282,6 +1789,7 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, else if (sctx->gs_shader.cso) key->as_es = 1; else { + key->as_ngg = stages_key.u.ngg; si_shader_selector_key_hw_vs(sctx, sel, key); if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid) @@ -1323,6 +1831,7 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, if (sctx->gs_shader.cso) key->as_es = 1; else { + key->as_ngg = stages_key.u.ngg; si_shader_selector_key_hw_vs(sctx, sel, key); if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid) @@ -1340,6 +1849,8 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, key->part.gs.prolog.gfx9_prev_is_vs = 1; } + key->as_ngg = stages_key.u.ngg; + /* Merged ES-GS can have unbalanced wave usage. * * ES threads are per-vertex, while GS threads are @@ -1400,11 +1911,11 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, blend && blend->alpha_to_coverage) key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR; - /* On SI and CIK except Hawaii, the CB doesn't clamp outputs + /* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs * to the range supported by the type if a channel has less * than 16 bits and the export format is 16_ABGR. */ - if (sctx->chip_class <= CIK && sctx->family != CHIP_HAWAII) { + if (sctx->chip_class <= GFX7 && sctx->family != CHIP_HAWAII) { key->part.ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8; key->part.ps.epilog.color_is_int10 = sctx->framebuffer.color_is_int10; } @@ -1478,7 +1989,7 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx); /* ps_uses_fbfetch is true only if the color buffer is bound. */ - if (sctx->ps_uses_fbfetch) { + if (sctx->ps_uses_fbfetch && !sctx->blitter->running) { struct pipe_surface *cb0 = sctx->framebuffer.state.cbufs[0]; struct pipe_resource *tex = cb0->texture; @@ -1511,7 +2022,6 @@ static void si_build_shader_variant(struct si_shader *shader, struct si_screen *sscreen = sel->screen; struct ac_llvm_compiler *compiler; struct pipe_debug_callback *debug = &shader->compiler_ctx_state.debug; - int r; if (thread_index >= 0) { if (low_priority) { @@ -1528,10 +2038,9 @@ static void si_build_shader_variant(struct si_shader *shader, compiler = shader->compiler_ctx_state.compiler; } - r = si_shader_create(sscreen, compiler, shader, debug); - if (unlikely(r)) { - PRINT_ERR("Failed to build shader variant (type=%u) %d\n", - sel->type, r); + if (unlikely(!si_shader_create(sscreen, compiler, shader, debug))) { + PRINT_ERR("Failed to build shader variant (type=%u)\n", + sel->type); shader->compilation_failed = true; return; } @@ -1540,7 +2049,7 @@ static void si_build_shader_variant(struct si_shader *shader, FILE *f = open_memstream(&shader->shader_log, &shader->shader_log_size); if (f) { - si_shader_dump(sscreen, shader, NULL, sel->type, f, false); + si_shader_dump(sscreen, shader, NULL, f, false); fclose(f); } } @@ -1580,6 +2089,7 @@ static bool si_check_missing_main_part(struct si_screen *sscreen, main_part->selector = sel; main_part->key.as_es = key->as_es; main_part->key.as_ls = key->as_ls; + main_part->key.as_ngg = key->as_ngg; main_part->is_monolithic = false; if (si_compile_tgsi_shader(sscreen, compiler_state->compiler, @@ -1592,12 +2102,19 @@ static bool si_check_missing_main_part(struct si_screen *sscreen, return true; } -/* Select the hw shader variant depending on the current state. */ -static int si_shader_select_with_key(struct si_screen *sscreen, - struct si_shader_ctx_state *state, - struct si_compiler_ctx_state *compiler_state, - struct si_shader_key *key, - int thread_index) +/** + * Select a shader variant according to the shader key. + * + * \param optimized_or_none If the key describes an optimized shader variant and + * the compilation isn't finished, don't select any + * shader and return an error. + */ +int si_shader_select_with_key(struct si_screen *sscreen, + struct si_shader_ctx_state *state, + struct si_compiler_ctx_state *compiler_state, + struct si_shader_key *key, + int thread_index, + bool optimized_or_none) { struct si_shader_selector *sel = state->cso; struct si_shader_selector *previous_stage_sel = NULL; @@ -1613,6 +2130,9 @@ again: memcmp(¤t->key, key, sizeof(*key)) == 0)) { if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) { if (current->is_optimized) { + if (optimized_or_none) + return -1; + memset(&key->opt, 0, sizeof(key->opt)); goto current_not_ready; } @@ -1649,6 +2169,8 @@ current_not_ready: * shader so as not to cause a stall due to compilation. */ if (iter->is_optimized) { + if (optimized_or_none) + return -1; memset(&key->opt, 0, sizeof(key->opt)); goto again; } @@ -1690,19 +2212,26 @@ current_not_ready: util_queue_fence_wait(&previous_stage_sel->ready); } - /* Compile the main shader part if it doesn't exist. This can happen - * if the initial guess was wrong. */ bool is_pure_monolithic = sscreen->use_monolithic_shaders || memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0; - if (!is_pure_monolithic) { - bool ok; + /* Compile the main shader part if it doesn't exist. This can happen + * if the initial guess was wrong. + * + * The prim discard CS doesn't need the main shader part. + */ + if (!is_pure_monolithic && + !key->opt.vs_as_prim_discard_cs) { + bool ok = true; /* Make sure the main shader part is present. This is needed * for shaders that can be compiled as VS, LS, or ES, and only * one of them is compiled at creation. * + * It is also needed for GS, which can be compiled as non-NGG + * and NGG. + * * For merged shaders, check that the starting shader's main * part is present. */ @@ -1721,10 +2250,13 @@ current_not_ready: previous_stage_sel, compiler_state, &shader1_key); mtx_unlock(&previous_stage_sel->mutex); - } else { + } + + if (ok) { ok = si_check_missing_main_part(sscreen, sel, compiler_state, key); } + if (!ok) { FREE(shader); mtx_unlock(&sel->mutex); @@ -1747,14 +2279,13 @@ current_not_ready: is_pure_monolithic || memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0; + /* The prim discard CS is always optimized. */ shader->is_optimized = - !is_pure_monolithic && - memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0; + (!is_pure_monolithic || key->opt.vs_as_prim_discard_cs) && + memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0; /* If it's an optimized shader, compile it asynchronously. */ - if (shader->is_optimized && - !is_pure_monolithic && - thread_index < 0) { + if (shader->is_optimized && thread_index < 0) { /* Compile it asynchronously. */ util_queue_add_job(&sscreen->shader_compiler_queue_low_priority, shader, &shader->ready, @@ -1773,6 +2304,12 @@ current_not_ready: /* Use the default (unoptimized) shader for now. */ memset(&key->opt, 0, sizeof(key->opt)); mtx_unlock(&sel->mutex); + + if (sscreen->options.sync_compile) + util_queue_fence_wait(&shader->ready); + + if (optimized_or_none) + return -1; goto again; } @@ -1802,14 +2339,15 @@ current_not_ready: static int si_shader_select(struct pipe_context *ctx, struct si_shader_ctx_state *state, + union si_vgt_stages_key stages_key, struct si_compiler_ctx_state *compiler_state) { struct si_context *sctx = (struct si_context *)ctx; struct si_shader_key key; - si_shader_selector_key(ctx, state->cso, &key); + si_shader_selector_key(ctx, state->cso, stages_key, &key); return si_shader_select_with_key(sctx->screen, state, compiler_state, - &key, -1); + &key, -1, false); } static void si_parse_next_shader_property(const struct tgsi_shader_info *info, @@ -1864,6 +2402,9 @@ static void si_init_shader_selector_async(void *job, int thread_index) assert(thread_index < ARRAY_SIZE(sscreen->compiler)); compiler = &sscreen->compiler[thread_index]; + if (sel->nir) + si_lower_nir(sel); + /* Compile the main shader part for use with a prolog and/or epilog. * If this fails, the driver will try to compile a monolithic shader * on demand. @@ -1886,6 +2427,13 @@ static void si_init_shader_selector_async(void *job, int thread_index) si_parse_next_shader_property(&sel->info, sel->so.num_outputs != 0, &shader->key); + if (sscreen->info.chip_class >= GFX10 && + !sscreen->options.disable_ngg && + (((sel->type == PIPE_SHADER_VERTEX || + sel->type == PIPE_SHADER_TESS_EVAL) && + !shader->key.as_ls && !shader->key.as_es) || + sel->type == PIPE_SHADER_GEOMETRY)) + shader->key.as_ngg = 1; if (sel->tokens || sel->nir) ir_binary = si_get_ir_binary(sel); @@ -1896,7 +2444,7 @@ static void si_init_shader_selector_async(void *job, int thread_index) if (ir_binary && si_shader_cache_load_shader(sscreen, ir_binary, shader)) { mtx_unlock(&sscreen->shader_cache_mutex); - si_shader_dump_stats_for_shader_db(shader, debug); + si_shader_dump_stats_for_shader_db(sscreen, shader, debug); } else { mtx_unlock(&sscreen->shader_cache_mutex); @@ -1962,7 +2510,11 @@ static void si_init_shader_selector_async(void *job, int thread_index) } } - /* The GS copy shader is always pre-compiled. */ + /* The GS copy shader is always pre-compiled. + * + * TODO-GFX10: We could compile the GS copy shader on demand, since it + * is only used in the (rare) non-NGG case. + */ if (sel->type == PIPE_SHADER_GEOMETRY) { sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, compiler, sel, debug); if (!sel->gs_copy_shader) { @@ -1982,12 +2534,12 @@ void si_schedule_initial_compile(struct si_context *sctx, unsigned processor, util_queue_fence_init(ready_fence); struct util_async_debug_callback async_debug; - bool wait = + bool debug = (sctx->debug.debug_message && !sctx->debug.async) || sctx->is_debug || si_can_dump_shader(sctx->screen, processor); - if (wait) { + if (debug) { u_async_debug_init(&async_debug); compiler_ctx_state->debug = async_debug.base; } @@ -1995,11 +2547,14 @@ void si_schedule_initial_compile(struct si_context *sctx, unsigned processor, util_queue_add_job(&sctx->screen->shader_compiler_queue, job, ready_fence, execute, NULL); - if (wait) { + if (debug) { util_queue_fence_wait(ready_fence); u_async_debug_drain(&async_debug, &sctx->debug); u_async_debug_cleanup(&async_debug); } + + if (sctx->screen->options.sync_compile) + util_queue_fence_wait(ready_fence); } /* Return descriptor slot usage masks from the given shader info. */ @@ -2058,10 +2613,9 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->nir = state->ir.nir; + si_nir_opts(sel->nir); si_nir_scan_shader(sel->nir, &sel->info); - si_nir_scan_tess_ctrl(sel->nir, &sel->info, &sel->tcs_info); - - si_lower_nir(sel); + si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info); } sel->type = sel->info.processor; @@ -2088,6 +2642,15 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->info.uses_kill && sctx->screen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL); + sel->prim_discard_cs_allowed = + sel->type == PIPE_SHADER_VERTEX && + !sel->info.uses_bindless_images && + !sel->info.uses_bindless_samplers && + !sel->info.writes_memory && + !sel->info.writes_viewport_index && + !sel->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] && + !sel->so.num_outputs; + /* Set which opcode uses which (i,j) pair. */ if (sel->info.uses_persp_opcode_interp_centroid) sel->info.uses_persp_centroid = true; @@ -2107,6 +2670,12 @@ static void *si_create_shader_selector(struct pipe_context *ctx, case PIPE_SHADER_GEOMETRY: sel->gs_output_prim = sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM]; + + /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */ + sel->rast_prim = sel->gs_output_prim; + if (util_rast_prim_is_triangles(sel->rast_prim)) + sel->rast_prim = PIPE_PRIM_TRIANGLES; + sel->gs_max_out_vertices = sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES]; sel->gs_num_invocations = @@ -2175,6 +2744,14 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->esgs_itemsize += 4; assert(((sel->esgs_itemsize / 4) & C_028AAC_ITEMSIZE) == 0); + + /* Only for TES: */ + if (sel->info.properties[TGSI_PROPERTY_TES_POINT_MODE]) + sel->rast_prim = PIPE_PRIM_POINTS; + else if (sel->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES) + sel->rast_prim = PIPE_PRIM_LINE_STRIP; + else + sel->rast_prim = PIPE_PRIM_TRIANGLES; break; case PIPE_SHADER_FRAGMENT: @@ -2208,6 +2785,7 @@ static void *si_create_shader_selector(struct pipe_context *ctx, } } break; + default:; } /* PA_CL_VS_OUT_CNTL */ @@ -2368,6 +2946,27 @@ static void si_update_tess_uses_prim_id(struct si_context *sctx) sctx->ps_shader.cso->info.uses_primid); } +static bool si_update_ngg(struct si_context *sctx) +{ + if (sctx->chip_class <= GFX9 || + sctx->screen->options.disable_ngg) + return false; + + bool new_ngg = true; + + /* EN_MAX_VERT_OUT_PER_GS_INSTANCE does not work with tesselation. */ + if (sctx->gs_shader.cso && sctx->tes_shader.cso && + sctx->gs_shader.cso->gs_num_invocations * sctx->gs_shader.cso->gs_max_out_vertices > 256) + new_ngg = false; + + if (new_ngg != sctx->ngg) { + sctx->ngg = new_ngg; + sctx->last_rast_prim = -1; /* reset this so that it gets updated */ + return true; + } + return false; +} + static void si_bind_gs_shader(struct pipe_context *ctx, void *state) { struct si_context *sctx = (struct si_context *)ctx; @@ -2375,6 +2974,7 @@ static void si_bind_gs_shader(struct pipe_context *ctx, void *state) struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx); struct si_shader_selector *sel = state; bool enable_changed = !!sctx->gs_shader.cso != !!sel; + bool ngg_changed; if (sctx->gs_shader.cso == sel) return; @@ -2386,8 +2986,10 @@ static void si_bind_gs_shader(struct pipe_context *ctx, void *state) si_update_common_shader_state(sctx); sctx->last_rast_prim = -1; /* reset this so that it gets updated */ - if (enable_changed) { + ngg_changed = si_update_ngg(sctx); + if (ngg_changed || enable_changed) si_shader_change_notify(sctx); + if (enable_changed) { if (sctx->ia_multi_vgt_param_key.u.uses_tess) si_update_tess_uses_prim_id(sctx); } @@ -2439,6 +3041,7 @@ static void si_bind_tes_shader(struct pipe_context *ctx, void *state) sctx->last_rast_prim = -1; /* reset this so that it gets updated */ if (enable_changed) { + si_update_ngg(sctx); si_shader_change_notify(sctx); sctx->last_tes_sh_base = -1; /* invalidate derived tess state */ } @@ -2492,14 +3095,21 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) util_queue_fence_destroy(&shader->ready); if (shader->pm4) { + /* If destroyed shaders were not unbound, the next compiled + * shader variant could get the same pointer address and so + * binding it to the same shader stage would be considered + * a no-op, causing random behavior. + */ switch (shader->selector->type) { case PIPE_SHADER_VERTEX: if (shader->key.as_ls) { - assert(sctx->chip_class <= VI); + assert(sctx->chip_class <= GFX8); si_pm4_delete_state(sctx, ls, shader->pm4); } else if (shader->key.as_es) { - assert(sctx->chip_class <= VI); + assert(sctx->chip_class <= GFX8); si_pm4_delete_state(sctx, es, shader->pm4); + } else if (shader->key.as_ngg) { + si_pm4_delete_state(sctx, gs, shader->pm4); } else { si_pm4_delete_state(sctx, vs, shader->pm4); } @@ -2509,8 +3119,10 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) break; case PIPE_SHADER_TESS_EVAL: if (shader->key.as_es) { - assert(sctx->chip_class <= VI); + assert(sctx->chip_class <= GFX8); si_pm4_delete_state(sctx, es, shader->pm4); + } else if (shader->key.as_ngg) { + si_pm4_delete_state(sctx, gs, shader->pm4); } else { si_pm4_delete_state(sctx, vs, shader->pm4); } @@ -2524,6 +3136,7 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) case PIPE_SHADER_FRAGMENT: si_pm4_delete_state(sctx, ps, shader->pm4); break; + default:; } } @@ -2563,6 +3176,8 @@ void si_destroy_shader_selector(struct si_context *sctx, si_delete_shader(sctx, sel->main_shader_part_ls); if (sel->main_shader_part_es) si_delete_shader(sctx, sel->main_shader_part_es); + if (sel->main_shader_part_ngg) + si_delete_shader(sctx, sel->main_shader_part_ngg); if (sel->gs_copy_shader) si_delete_shader(sctx, sel->gs_copy_shader); @@ -2589,7 +3204,8 @@ static unsigned si_get_ps_input_cntl(struct si_context *sctx, unsigned j, offset, ps_input_cntl = 0; if (interpolate == TGSI_INTERPOLATE_CONSTANT || - (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade)) + (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade) || + name == TGSI_SEMANTIC_PRIMID) ps_input_cntl |= S_028644_FLAT_SHADE(1); if (name == TGSI_SEMANTIC_PCOORD || @@ -2624,8 +3240,8 @@ static unsigned si_get_ps_input_cntl(struct si_context *sctx, } } - if (name == TGSI_SEMANTIC_PRIMID) - /* PrimID is written after the last output. */ + if (j == vsinfo->num_outputs && name == TGSI_SEMANTIC_PRIMID) + /* PrimID is written after the last output when HW VS is used. */ ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[vsinfo->num_outputs]); else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) { /* No corresponding output found, load defaults into input. @@ -2684,9 +3300,13 @@ static void si_emit_spi_map(struct si_context *sctx) /* R_028644_SPI_PS_INPUT_CNTL_0 */ /* Dota 2: Only ~16% of SPI map updates set different values. */ /* Talos: Only ~9% of SPI map updates set different values. */ + unsigned initial_cdw = sctx->gfx_cs->current.cdw; radeon_opt_set_context_regn(sctx, R_028644_SPI_PS_INPUT_CNTL_0, spi_ps_input_cntl, sctx->tracked_regs.spi_ps_input_cntl, num_interp); + + if (initial_cdw != sctx->gfx_cs->current.cdw) + sctx->context_roll = true; } /** @@ -2722,10 +3342,10 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) unsigned num_se = sctx->screen->info.max_se; unsigned wave_size = 64; unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */ - /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16. - * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2). + /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16. + * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2). */ - unsigned gs_vertex_reuse = (sctx->chip_class >= VI ? 32 : 16) * num_se; + unsigned gs_vertex_reuse = (sctx->chip_class >= GFX8 ? 32 : 16) * num_se; unsigned alignment = 256 * num_se; /* The maximum size is 63.999 MB per SE. */ unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se; @@ -2752,7 +3372,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) * * GFX9 doesn't have the ESGS ring. */ - bool update_esgs = sctx->chip_class <= VI && + bool update_esgs = sctx->chip_class <= GFX8 && esgs_ring_size && (!sctx->esgs_ring || sctx->esgs_ring->width0 < esgs_ring_size); @@ -2790,9 +3410,9 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) if (!pm4) return false; - if (sctx->chip_class >= CIK) { + if (sctx->chip_class >= GFX7) { if (sctx->esgs_ring) { - assert(sctx->chip_class <= VI); + assert(sctx->chip_class <= GFX8); si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE, sctx->esgs_ring->width0 / 256); } @@ -2824,7 +3444,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) /* Set ring bindings. */ if (sctx->esgs_ring) { - assert(sctx->chip_class <= VI); + assert(sctx->chip_class <= GFX8); si_set_ring_buffer(sctx, SI_ES_RING_ESGS, sctx->esgs_ring, 0, sctx->esgs_ring->width0, true, true, 4, 64, 0); @@ -2866,7 +3486,6 @@ static int si_update_scratch_buffer(struct si_context *sctx, struct si_shader *shader) { uint64_t scratch_va = sctx->scratch_buffer->gpu_address; - int r; if (!shader) return 0; @@ -2891,22 +3510,16 @@ static int si_update_scratch_buffer(struct si_context *sctx, assert(sctx->scratch_buffer); - if (shader->previous_stage) - si_shader_apply_scratch_relocs(shader->previous_stage, scratch_va); - - si_shader_apply_scratch_relocs(shader, scratch_va); - /* Replace the shader bo with a new bo that has the relocs applied. */ - r = si_shader_binary_upload(sctx->screen, shader); - if (r) { + if (!si_shader_binary_upload(sctx->screen, shader, scratch_va)) { si_shader_unlock(shader); - return r; + return -1; } /* Update the shader state to use the new shader bo. */ si_shader_init_pm4_state(sctx->screen, shader); - r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer); + si_resource_reference(&shader->scratch_bo, sctx->scratch_buffer); si_shader_unlock(shader); return 1; @@ -2981,10 +3594,12 @@ static bool si_update_scratch_relocs(struct si_context *sctx) if (r < 0) return false; if (r == 1) { - if (sctx->tes_shader.current) + if (sctx->vs_shader.current->key.as_ls) si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); - else if (sctx->gs_shader.current) + else if (sctx->vs_shader.current->key.as_es) si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); + else if (sctx->vs_shader.current->key.as_ngg) + si_pm4_bind_state(sctx, gs, sctx->vs_shader.current->pm4); else si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); } @@ -2994,8 +3609,10 @@ static bool si_update_scratch_relocs(struct si_context *sctx) if (r < 0) return false; if (r == 1) { - if (sctx->gs_shader.current) + if (sctx->tes_shader.current->key.as_es) si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); + else if (sctx->tes_shader.current->key.as_ngg) + si_pm4_bind_state(sctx, gs, sctx->tes_shader.current->pm4); else si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); } @@ -3016,7 +3633,7 @@ static bool si_update_spi_tmpring_size(struct si_context *sctx) if (scratch_needed_size > 0) { if (scratch_needed_size > current_scratch_buffer_size) { /* Create a bigger scratch buffer */ - r600_resource_reference(&sctx->scratch_buffer, NULL); + si_resource_reference(&sctx->scratch_buffer, NULL); sctx->scratch_buffer = si_aligned_buffer_create(&sctx->screen->b, @@ -3066,19 +3683,22 @@ static void si_init_tess_factor_ring(struct si_context *sctx) si_init_config_add_vgt_flush(sctx); - si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_rings), + si_pm4_add_bo(sctx->init_config, si_resource(sctx->tess_rings), RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS); - uint64_t factor_va = r600_resource(sctx->tess_rings)->gpu_address + + uint64_t factor_va = si_resource(sctx->tess_rings)->gpu_address + sctx->screen->tess_offchip_ring_size; /* Append these registers to the init config state. */ - if (sctx->chip_class >= CIK) { + if (sctx->chip_class >= GFX7) { si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE, S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4)); si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE, factor_va >> 8); - if (sctx->chip_class >= GFX9) + if (sctx->chip_class >= GFX10) + si_pm4_set_reg(sctx->init_config, R_030984_VGT_TF_MEMORY_BASE_HI_UMD, + S_030984_BASE_HI(factor_va >> 40)); + else if (sctx->chip_class == GFX9) si_pm4_set_reg(sctx->init_config, R_030944_VGT_TF_MEMORY_BASE_HI, S_030944_BASE_HI(factor_va >> 40)); si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM, @@ -3100,71 +3720,51 @@ static void si_init_tess_factor_ring(struct si_context *sctx) si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); } -/** - * This is used when TCS is NULL in the VS->TCS->TES chain. In this case, - * VS passes its outputs to TES directly, so the fixed-function shader only - * has to write TESSOUTER and TESSINNER. - */ -static void si_generate_fixed_func_tcs(struct si_context *sctx) +static struct si_pm4_state *si_build_vgt_shader_config(struct si_screen *screen, + union si_vgt_stages_key key) { - struct ureg_src outer, inner; - struct ureg_dst tessouter, tessinner; - struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL); + struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state); + uint32_t stages = 0; - if (!ureg) - return; /* if we get here, we're screwed */ + if (key.u.tess) { + stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | + S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1); - assert(!sctx->fixed_func_tcs_shader.cso); - - outer = ureg_DECL_system_value(ureg, - TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI, 0); - inner = ureg_DECL_system_value(ureg, - TGSI_SEMANTIC_DEFAULT_TESSINNER_SI, 0); + if (key.u.gs) + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | + S_028B54_GS_EN(1); + else if (key.u.ngg) + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS); + else + stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS); + } else if (key.u.gs) { + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | + S_028B54_GS_EN(1); + } else if (key.u.ngg) { + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL); + } - tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0); - tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0); + if (key.u.ngg) { + stages |= S_028B54_PRIMGEN_EN(1); + if (key.u.streamout) + stages |= S_028B54_NGG_WAVE_ID_EN(1); + } else if (key.u.gs) + stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); - ureg_MOV(ureg, tessouter, outer); - ureg_MOV(ureg, tessinner, inner); - ureg_END(ureg); + if (screen->info.chip_class >= GFX9) + stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2); - sctx->fixed_func_tcs_shader.cso = - ureg_create_shader_and_destroy(ureg, &sctx->b); + si_pm4_set_reg(pm4, R_028B54_VGT_SHADER_STAGES_EN, stages); + return pm4; } -static void si_update_vgt_shader_config(struct si_context *sctx) +static void si_update_vgt_shader_config(struct si_context *sctx, + union si_vgt_stages_key key) { - /* Calculate the index of the config. - * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */ - unsigned index = 2*!!sctx->tes_shader.cso + !!sctx->gs_shader.cso; - struct si_pm4_state **pm4 = &sctx->vgt_shader_config[index]; - - if (!*pm4) { - uint32_t stages = 0; + struct si_pm4_state **pm4 = &sctx->vgt_shader_config[key.index]; - *pm4 = CALLOC_STRUCT(si_pm4_state); - - if (sctx->tes_shader.cso) { - stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | - S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1); - - if (sctx->gs_shader.cso) - stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | - S_028B54_GS_EN(1) | - S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); - else - stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS); - } else if (sctx->gs_shader.cso) { - stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | - S_028B54_GS_EN(1) | - S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); - } - - if (sctx->chip_class >= GFX9) - stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2); - - si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages); - } + if (unlikely(!*pm4)) + *pm4 = si_build_vgt_shader_config(sctx->screen, key); si_pm4_bind_state(sctx, vgt_shader_config, *pm4); } @@ -3176,6 +3776,7 @@ bool si_update_shaders(struct si_context *sctx) struct si_shader *old_vs = si_get_vs_state(sctx); bool old_clip_disable = old_vs ? old_vs->key.opt.clip_disable : false; struct si_shader *old_ps = sctx->ps_shader.current; + union si_vgt_stages_key key; unsigned old_spi_shader_col_format = old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0; int r; @@ -3184,7 +3785,25 @@ bool si_update_shaders(struct si_context *sctx) compiler_state.debug = sctx->debug; compiler_state.is_debug_context = sctx->is_debug; - /* Update stages before GS. */ + key.index = 0; + + if (sctx->tes_shader.cso) + key.u.tess = 1; + if (sctx->gs_shader.cso) + key.u.gs = 1; + + if (sctx->chip_class >= GFX10) { + key.u.ngg = sctx->ngg; + + if (sctx->gs_shader.cso) + key.u.streamout = !!sctx->gs_shader.cso->so.num_outputs; + else if (sctx->tes_shader.cso) + key.u.streamout = !!sctx->tes_shader.cso->so.num_outputs; + else + key.u.streamout = !!sctx->vs_shader.cso->so.num_outputs; + } + + /* Update TCS and TES. */ if (sctx->tes_shader.cso) { if (!sctx->tess_rings) { si_init_tess_factor_ring(sctx); @@ -3192,92 +3811,93 @@ bool si_update_shaders(struct si_context *sctx) return false; } - /* VS as LS */ - if (sctx->chip_class <= VI) { - r = si_shader_select(ctx, &sctx->vs_shader, - &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); - } - if (sctx->tcs_shader.cso) { - r = si_shader_select(ctx, &sctx->tcs_shader, + r = si_shader_select(ctx, &sctx->tcs_shader, key, &compiler_state); if (r) return false; si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4); } else { if (!sctx->fixed_func_tcs_shader.cso) { - si_generate_fixed_func_tcs(sctx); + sctx->fixed_func_tcs_shader.cso = + si_create_fixed_func_tcs(sctx); if (!sctx->fixed_func_tcs_shader.cso) return false; } r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader, - &compiler_state); + key, &compiler_state); if (r) return false; si_pm4_bind_state(sctx, hs, sctx->fixed_func_tcs_shader.current->pm4); } - if (sctx->gs_shader.cso) { - /* TES as ES */ - if (sctx->chip_class <= VI) { - r = si_shader_select(ctx, &sctx->tes_shader, - &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); - } - } else { - /* TES as VS */ - r = si_shader_select(ctx, &sctx->tes_shader, - &compiler_state); + if (!sctx->gs_shader.cso || sctx->chip_class <= GFX8) { + r = si_shader_select(ctx, &sctx->tes_shader, key, &compiler_state); if (r) return false; - si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); - } - } else if (sctx->gs_shader.cso) { - if (sctx->chip_class <= VI) { - /* VS as ES */ - r = si_shader_select(ctx, &sctx->vs_shader, - &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); - si_pm4_bind_state(sctx, ls, NULL); - si_pm4_bind_state(sctx, hs, NULL); + if (sctx->gs_shader.cso) { + /* TES as ES */ + assert(sctx->chip_class <= GFX8); + si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); + } else if (key.u.ngg) { + si_pm4_bind_state(sctx, gs, sctx->tes_shader.current->pm4); + } else { + si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); + } } } else { - /* VS as VS */ - r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); - si_pm4_bind_state(sctx, ls, NULL); + if (sctx->chip_class <= GFX8) + si_pm4_bind_state(sctx, ls, NULL); si_pm4_bind_state(sctx, hs, NULL); } /* Update GS. */ if (sctx->gs_shader.cso) { - r = si_shader_select(ctx, &sctx->gs_shader, &compiler_state); + r = si_shader_select(ctx, &sctx->gs_shader, key, &compiler_state); if (r) return false; si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4); - si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4); + if (!key.u.ngg) { + si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4); - if (!si_update_gs_ring_buffers(sctx)) - return false; + if (!si_update_gs_ring_buffers(sctx)) + return false; + } else { + si_pm4_bind_state(sctx, vs, NULL); + } } else { - si_pm4_bind_state(sctx, gs, NULL); - if (sctx->chip_class <= VI) - si_pm4_bind_state(sctx, es, NULL); + if (!key.u.ngg) { + si_pm4_bind_state(sctx, gs, NULL); + if (sctx->chip_class <= GFX8) + si_pm4_bind_state(sctx, es, NULL); + } } - si_update_vgt_shader_config(sctx); + /* Update VS. */ + if ((!key.u.tess && !key.u.gs) || sctx->chip_class <= GFX8) { + r = si_shader_select(ctx, &sctx->vs_shader, key, &compiler_state); + if (r) + return false; + + if (!key.u.tess && !key.u.gs) { + if (key.u.ngg) { + si_pm4_bind_state(sctx, gs, sctx->vs_shader.current->pm4); + si_pm4_bind_state(sctx, vs, NULL); + } else { + si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); + } + } else if (sctx->tes_shader.cso) { + si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); + } else { + assert(sctx->gs_shader.cso); + si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); + } + } + + si_update_vgt_shader_config(sctx, key); if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable) si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs); @@ -3285,7 +3905,7 @@ bool si_update_shaders(struct si_context *sctx) if (sctx->ps_shader.cso) { unsigned db_shader_control; - r = si_shader_select(ctx, &sctx->ps_shader, &compiler_state); + r = si_shader_select(ctx, &sctx->ps_shader, key, &compiler_state); if (r) return false; si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4); @@ -3294,7 +3914,9 @@ bool si_update_shaders(struct si_context *sctx) sctx->ps_shader.cso->db_shader_control | S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS); - if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs) || + if (si_pm4_state_changed(sctx, ps) || + si_pm4_state_changed(sctx, vs) || + (key.u.ngg && si_pm4_state_changed(sctx, gs)) || sctx->sprite_coord_enable != rs->sprite_coord_enable || sctx->flatshade != rs->flatshade) { sctx->sprite_coord_enable = rs->sprite_coord_enable; @@ -3320,7 +3942,7 @@ bool si_update_shaders(struct si_context *sctx) sctx->smoothing_enabled = sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing; si_mark_atom_dirty(sctx, &sctx->atoms.s.msaa_config); - if (sctx->chip_class == SI) + if (sctx->chip_class == GFX6) si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state); if (sctx->framebuffer.nr_samples <= 1) @@ -3338,7 +3960,7 @@ bool si_update_shaders(struct si_context *sctx) return false; } - if (sctx->chip_class >= CIK) { + if (sctx->chip_class >= GFX7) { if (si_pm4_state_enabled_and_changed(sctx, ls)) sctx->prefetch_L2_mask |= SI_PREFETCH_LS; else if (!sctx->queued.named.ls) @@ -3388,70 +4010,6 @@ static void si_emit_scratch_state(struct si_context *sctx) } } -void *si_get_blit_vs(struct si_context *sctx, enum blitter_attrib_type type, - unsigned num_layers) -{ - unsigned vs_blit_property; - void **vs; - - switch (type) { - case UTIL_BLITTER_ATTRIB_NONE: - vs = num_layers > 1 ? &sctx->vs_blit_pos_layered : - &sctx->vs_blit_pos; - vs_blit_property = SI_VS_BLIT_SGPRS_POS; - break; - case UTIL_BLITTER_ATTRIB_COLOR: - vs = num_layers > 1 ? &sctx->vs_blit_color_layered : - &sctx->vs_blit_color; - vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR; - break; - case UTIL_BLITTER_ATTRIB_TEXCOORD_XY: - case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW: - assert(num_layers == 1); - vs = &sctx->vs_blit_texcoord; - vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD; - break; - default: - assert(0); - return NULL; - } - if (*vs) - return *vs; - - struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX); - if (!ureg) - return NULL; - - /* Tell the shader to load VS inputs from SGPRs: */ - ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS, vs_blit_property); - ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true); - - /* This is just a pass-through shader with 1-3 MOV instructions. */ - ureg_MOV(ureg, - ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0), - ureg_DECL_vs_input(ureg, 0)); - - if (type != UTIL_BLITTER_ATTRIB_NONE) { - ureg_MOV(ureg, - ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0), - ureg_DECL_vs_input(ureg, 1)); - } - - if (num_layers > 1) { - struct ureg_src instance_id = - ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0); - struct ureg_dst layer = - ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0); - - ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X), - ureg_scalar(instance_id, TGSI_SWIZZLE_X)); - } - ureg_END(ureg); - - *vs = ureg_create_shader_and_destroy(ureg, &sctx->b); - return *vs; -} - void si_init_shader_functions(struct si_context *sctx) { sctx->atoms.s.spi_map.emit = si_emit_spi_map;