X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fradeonsi%2Fsi_state_shaders.c;h=18015bbec485f74bfffdd202d240d112ce64c808;hb=312e04689a9d8d4f9c319e69c61220e10653cfcd;hp=628844df7e38a4606229b9d9a5575f02a81d1da0;hpb=c9b7a37b8f7979433655e269a2b161d33eb41659;p=mesa.git diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c index 628844df7e3..18015bbec48 100644 --- a/src/gallium/drivers/radeonsi/si_state_shaders.c +++ b/src/gallium/drivers/radeonsi/si_state_shaders.c @@ -23,10 +23,10 @@ */ #include "si_build_pm4.h" -#include "gfx9d.h" +#include "sid.h" #include "compiler/nir/nir_serialize.h" -#include "tgsi/tgsi_parse.h" +#include "nir/tgsi_to_nir.h" #include "util/hash_table.h" #include "util/crc32.h" #include "util/u_async_debug.h" @@ -41,41 +41,53 @@ /* SHADER_CACHE */ /** - * Return the IR binary in a buffer. For TGSI the first 4 bytes contain its - * size as integer. + * Return the IR key for the shader cache. */ -void *si_get_ir_binary(struct si_shader_selector *sel) +void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es, + unsigned char ir_sha1_cache_key[20]) { - struct blob blob; + struct blob blob = {}; unsigned ir_size; void *ir_binary; - if (sel->tokens) { - ir_binary = sel->tokens; - ir_size = tgsi_num_tokens(sel->tokens) * - sizeof(struct tgsi_token); + if (sel->nir_binary) { + ir_binary = sel->nir_binary; + ir_size = sel->nir_size; } else { assert(sel->nir); blob_init(&blob); - nir_serialize(&blob, sel->nir); + nir_serialize(&blob, sel->nir, true); ir_binary = blob.data; ir_size = blob.size; } - unsigned size = 4 + ir_size + sizeof(sel->so); - char *result = (char*)MALLOC(size); - if (!result) - return NULL; - - *((uint32_t*)result) = size; - memcpy(result + 4, ir_binary, ir_size); - memcpy(result + 4 + ir_size, &sel->so, sizeof(sel->so)); + /* These settings affect the compilation, but they are not derived + * from the input shader IR. + */ + unsigned shader_variant_flags = 0; + if (ngg) + shader_variant_flags |= 1 << 0; if (sel->nir) + shader_variant_flags |= 1 << 1; + if (si_get_wave_size(sel->screen, sel->type, ngg, es) == 32) + shader_variant_flags |= 1 << 2; + if (sel->force_correct_derivs_after_kill) + shader_variant_flags |= 1 << 3; + + struct mesa_sha1 ctx; + _mesa_sha1_init(&ctx); + _mesa_sha1_update(&ctx, &shader_variant_flags, 4); + _mesa_sha1_update(&ctx, ir_binary, ir_size); + if (sel->type == PIPE_SHADER_VERTEX || + sel->type == PIPE_SHADER_TESS_EVAL || + sel->type == PIPE_SHADER_GEOMETRY) + _mesa_sha1_update(&ctx, &sel->so, sizeof(sel->so)); + _mesa_sha1_final(&ctx, ir_sha1_cache_key); + + if (ir_binary == blob.data) blob_finish(&blob); - - return result; } /** Copy "data" to "ptr" and return the next dword following copied data. */ @@ -127,21 +139,21 @@ static uint32_t *read_chunk(uint32_t *ptr, void **data, unsigned *size) static void *si_get_shader_binary(struct si_shader *shader) { /* There is always a size of data followed by the data itself. */ - unsigned relocs_size = shader->binary.reloc_count * - sizeof(shader->binary.relocs[0]); - unsigned disasm_size = shader->binary.disasm_string ? - strlen(shader->binary.disasm_string) + 1 : 0; unsigned llvm_ir_size = shader->binary.llvm_ir_string ? strlen(shader->binary.llvm_ir_string) + 1 : 0; + + /* Refuse to allocate overly large buffers and guard against integer + * overflow. */ + if (shader->binary.elf_size > UINT_MAX / 4 || + llvm_ir_size > UINT_MAX / 4) + return NULL; + unsigned size = 4 + /* total size */ 4 + /* CRC32 of the data below */ align(sizeof(shader->config), 4) + align(sizeof(shader->info), 4) + - 4 + align(shader->binary.code_size, 4) + - 4 + align(shader->binary.rodata_size, 4) + - 4 + align(relocs_size, 4) + - 4 + align(disasm_size, 4) + + 4 + align(shader->binary.elf_size, 4) + 4 + align(llvm_ir_size, 4); void *buffer = CALLOC(1, size); uint32_t *ptr = (uint32_t*)buffer; @@ -154,10 +166,7 @@ static void *si_get_shader_binary(struct si_shader *shader) ptr = write_data(ptr, &shader->config, sizeof(shader->config)); ptr = write_data(ptr, &shader->info, sizeof(shader->info)); - ptr = write_chunk(ptr, shader->binary.code, shader->binary.code_size); - ptr = write_chunk(ptr, shader->binary.rodata, shader->binary.rodata_size); - ptr = write_chunk(ptr, shader->binary.relocs, relocs_size); - ptr = write_chunk(ptr, shader->binary.disasm_string, disasm_size); + ptr = write_chunk(ptr, shader->binary.elf_buffer, shader->binary.elf_size); ptr = write_chunk(ptr, shader->binary.llvm_ir_string, llvm_ir_size); assert((char *)ptr - (char *)buffer == size); @@ -175,6 +184,7 @@ static bool si_load_shader_binary(struct si_shader *shader, void *binary) uint32_t size = *ptr++; uint32_t crc32 = *ptr++; unsigned chunk_size; + unsigned elf_size; if (util_hash_crc32(ptr, size - 8) != crc32) { fprintf(stderr, "radeonsi: binary shader has invalid CRC32\n"); @@ -183,13 +193,9 @@ static bool si_load_shader_binary(struct si_shader *shader, void *binary) ptr = read_data(ptr, &shader->config, sizeof(shader->config)); ptr = read_data(ptr, &shader->info, sizeof(shader->info)); - ptr = read_chunk(ptr, (void**)&shader->binary.code, - &shader->binary.code_size); - ptr = read_chunk(ptr, (void**)&shader->binary.rodata, - &shader->binary.rodata_size); - ptr = read_chunk(ptr, (void**)&shader->binary.relocs, &chunk_size); - shader->binary.reloc_count = chunk_size / sizeof(shader->binary.relocs[0]); - ptr = read_chunk(ptr, (void**)&shader->binary.disasm_string, &chunk_size); + ptr = read_chunk(ptr, (void**)&shader->binary.elf_buffer, + &elf_size); + shader->binary.elf_size = elf_size; ptr = read_chunk(ptr, (void**)&shader->binary.llvm_ir_string, &chunk_size); return true; @@ -198,10 +204,9 @@ static bool si_load_shader_binary(struct si_shader *shader, void *binary) /** * Insert a shader into the cache. It's assumed the shader is not in the cache. * Use si_shader_cache_load_shader before calling this. - * - * Returns false on failure, in which case the ir_binary should be freed. */ -bool si_shader_cache_insert_shader(struct si_screen *sscreen, void *ir_binary, +void si_shader_cache_insert_shader(struct si_screen *sscreen, + unsigned char ir_sha1_cache_key[20], struct si_shader *shader, bool insert_into_disk_cache) { @@ -209,42 +214,41 @@ bool si_shader_cache_insert_shader(struct si_screen *sscreen, void *ir_binary, struct hash_entry *entry; uint8_t key[CACHE_KEY_SIZE]; - entry = _mesa_hash_table_search(sscreen->shader_cache, ir_binary); + entry = _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key); if (entry) - return false; /* already added */ + return; /* already added */ hw_binary = si_get_shader_binary(shader); if (!hw_binary) - return false; + return; - if (_mesa_hash_table_insert(sscreen->shader_cache, ir_binary, + if (_mesa_hash_table_insert(sscreen->shader_cache, + mem_dup(ir_sha1_cache_key, 20), hw_binary) == NULL) { FREE(hw_binary); - return false; + return; } if (sscreen->disk_shader_cache && insert_into_disk_cache) { - disk_cache_compute_key(sscreen->disk_shader_cache, ir_binary, - *((uint32_t *)ir_binary), key); + disk_cache_compute_key(sscreen->disk_shader_cache, + ir_sha1_cache_key, 20, key); disk_cache_put(sscreen->disk_shader_cache, key, hw_binary, *((uint32_t *) hw_binary), NULL); } - - return true; } -bool si_shader_cache_load_shader(struct si_screen *sscreen, void *ir_binary, +bool si_shader_cache_load_shader(struct si_screen *sscreen, + unsigned char ir_sha1_cache_key[20], struct si_shader *shader) { struct hash_entry *entry = - _mesa_hash_table_search(sscreen->shader_cache, ir_binary); + _mesa_hash_table_search(sscreen->shader_cache, ir_sha1_cache_key); if (!entry) { if (sscreen->disk_shader_cache) { unsigned char sha1[CACHE_KEY_SIZE]; - size_t tg_size = *((uint32_t *) ir_binary); disk_cache_compute_key(sscreen->disk_shader_cache, - ir_binary, tg_size, sha1); + ir_sha1_cache_key, 20, sha1); size_t binary_size; uint8_t *buffer = @@ -275,16 +279,13 @@ bool si_shader_cache_load_shader(struct si_screen *sscreen, void *ir_binary, } free(buffer); - if (!si_shader_cache_insert_shader(sscreen, ir_binary, - shader, false)) - FREE(ir_binary); + si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key, + shader, false); } else { return false; } } else { - if (si_load_shader_binary(shader, entry->data)) - FREE(ir_binary); - else + if (!si_load_shader_binary(shader, entry->data)) return false; } p_atomic_inc(&sscreen->num_shader_cache_hits); @@ -293,20 +294,14 @@ bool si_shader_cache_load_shader(struct si_screen *sscreen, void *ir_binary, static uint32_t si_shader_cache_key_hash(const void *key) { - /* The first dword is the key size. */ - return util_hash_crc32(key, *(uint32_t*)key); + /* Take the first dword of SHA1. */ + return *(uint32_t*)key; } static bool si_shader_cache_key_equals(const void *a, const void *b) { - uint32_t *keya = (uint32_t*)a; - uint32_t *keyb = (uint32_t*)b; - - /* The first dword is the key size. */ - if (*keya != *keyb) - return false; - - return memcmp(keya, keyb, *keya) == 0; + /* Compare SHA1s. */ + return memcmp(a, b, 20) == 0; } static void si_destroy_shader_cache_entry(struct hash_entry *entry) @@ -317,7 +312,7 @@ static void si_destroy_shader_cache_entry(struct hash_entry *entry) bool si_init_shader_cache(struct si_screen *sscreen) { - (void) mtx_init(&sscreen->shader_cache_mutex, mtx_plain); + (void) simple_mtx_init(&sscreen->shader_cache_mutex, mtx_plain); sscreen->shader_cache = _mesa_hash_table_create(NULL, si_shader_cache_key_hash, @@ -331,7 +326,7 @@ void si_destroy_shader_cache(struct si_screen *sscreen) if (sscreen->shader_cache) _mesa_hash_table_destroy(sscreen->shader_cache, si_destroy_shader_cache_entry); - mtx_destroy(&sscreen->shader_cache_mutex); + simple_mtx_destroy(&sscreen->shader_cache_mutex); } /* SHADER STATES */ @@ -387,7 +382,7 @@ static void si_set_tesseval_regs(struct si_screen *sscreen, else topology = V_028B6C_OUTPUT_TRIANGLE_CW; - if (sscreen->has_distributed_tess) { + if (sscreen->info.has_distributed_tess) { if (sscreen->info.family == CHIP_FIJI || sscreen->info.family >= CHIP_POLARIS10) distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS; @@ -424,7 +419,8 @@ static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen, { unsigned type = sel->type; - if (sscreen->info.family < CHIP_POLARIS10) + if (sscreen->info.family < CHIP_POLARIS10 || + sscreen->info.chip_class >= GFX10) return; /* VS as VS, or VS as ES: */ @@ -467,10 +463,34 @@ static unsigned si_get_num_vs_user_sgprs(unsigned num_always_on_user_sgprs) return num_always_on_user_sgprs + 1; } +/* Return VGPR_COMP_CNT for the API vertex shader. This can be hw LS, LSHS, ES, ESGS, VS. */ +static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen, + struct si_shader *shader, bool legacy_vs_prim_id) +{ + assert(shader->selector->type == PIPE_SHADER_VERTEX || + (shader->previous_stage_sel && + shader->previous_stage_sel->type == PIPE_SHADER_VERTEX)); + + /* GFX6-9 LS (VertexID, RelAutoindex, InstanceID / StepRate0(==1), ...). + * GFX6-9 ES,VS (VertexID, InstanceID / StepRate0(==1), VSPrimID, ...) + * GFX10 LS (VertexID, RelAutoindex, UserVGPR1, InstanceID). + * GFX10 ES,VS (VertexID, UserVGPR0, UserVGPR1 or VSPrimID, UserVGPR2 or InstanceID) + */ + bool is_ls = shader->selector->type == PIPE_SHADER_TESS_CTRL || shader->key.as_ls; + + if (sscreen->info.chip_class >= GFX10 && shader->info.uses_instanceid) + return 3; + else if ((is_ls && shader->info.uses_instanceid) || legacy_vs_prim_id) + return 2; + else if (is_ls || shader->info.uses_instanceid) + return 1; + else + return 0; +} + static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader) { struct si_pm4_state *pm4; - unsigned vgpr_comp_cnt; uint64_t va; assert(sscreen->info.chip_class <= GFX8); @@ -482,18 +502,12 @@ static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader) va = shader->bo->gpu_address; si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); - /* We need at least 2 components for LS. - * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID). - * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded. - */ - vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1; - si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8); si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40)); shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) | S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) | - S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) | + S_00B528_VGPR_COMP_CNT(si_get_vs_vgpr_comp_cnt(sscreen, shader, false)) | S_00B528_DX10_CLAMP(1) | S_00B528_FLOAT_MODE(shader->config.float_mode); shader->config.rsrc2 = S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR)) | @@ -504,7 +518,6 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader) { struct si_pm4_state *pm4; uint64_t va; - unsigned ls_vgpr_comp_cnt = 0; pm4 = si_get_shader_pm4_state(shader); if (!pm4) @@ -514,22 +527,25 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader) si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); if (sscreen->info.chip_class >= GFX9) { - si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8); - si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40)); - - /* We need at least 2 components for LS. - * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID). - * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded. - */ - ls_vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1; + if (sscreen->info.chip_class >= GFX10) { + si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8); + si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40)); + } else { + si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8); + si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40)); + } unsigned num_user_sgprs = si_get_num_vs_user_sgprs(GFX9_TCS_NUM_USER_SGPR); shader->config.rsrc2 = S_00B42C_USER_SGPR(num_user_sgprs) | - S_00B42C_USER_SGPR_MSB(num_user_sgprs >> 5) | S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0); + + if (sscreen->info.chip_class >= GFX10) + shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5); + else + shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5); } else { si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8); si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, S_00B424_MEM_BASE(va >> 40)); @@ -541,11 +557,16 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader) } si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS, - S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) | + S_00B428_VGPRS((shader->config.num_vgprs - 1) / + (sscreen->ge_wave_size == 32 ? 8 : 4)) | + (sscreen->info.chip_class <= GFX9 ? + S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) : 0) | S_00B428_DX10_CLAMP(1) | + S_00B428_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B428_WGP_MODE(sscreen->info.chip_class >= GFX10) | S_00B428_FLOAT_MODE(shader->config.float_mode) | - S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt)); + S_00B428_LS_VGPR_COMP_CNT(sscreen->info.chip_class >= GFX9 ? + si_get_vs_vgpr_comp_cnt(sscreen, shader, false) : 0)); if (sscreen->info.chip_class <= GFX8) { si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, @@ -598,8 +619,7 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); if (shader->selector->type == PIPE_SHADER_VERTEX) { - /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */ - vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0; + vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false); num_user_sgprs = si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR); } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) { vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2; @@ -628,41 +648,9 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4); } -static unsigned si_conv_prim_to_gs_out(unsigned mode) -{ - static const int prim_conv[] = { - [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST, - [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, - [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, - [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST, - }; - assert(mode < ARRAY_SIZE(prim_conv)); - - return prim_conv[mode]; -} - -struct gfx9_gs_info { - unsigned es_verts_per_subgroup; - unsigned gs_prims_per_subgroup; - unsigned gs_inst_prims_in_subgroup; - unsigned max_prims_per_subgroup; - unsigned lds_size; -}; - -static void gfx9_get_gs_info(struct si_shader_selector *es, - struct si_shader_selector *gs, - struct gfx9_gs_info *out) +void gfx9_get_gs_info(struct si_shader_selector *es, + struct si_shader_selector *gs, + struct gfx9_gs_info *out) { unsigned gs_num_invocations = MAX2(gs->gs_num_invocations, 1); unsigned input_prim = gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]; @@ -753,7 +741,7 @@ static void gfx9_get_gs_info(struct si_shader_selector *es, out->gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations; out->max_prims_per_subgroup = out->gs_inst_prims_in_subgroup * gs->gs_max_out_vertices; - out->lds_size = align(esgs_lds_size, 128) / 128; + out->esgs_ring_size = 4 * esgs_lds_size; assert(out->max_prims_per_subgroup <= max_out_prims); } @@ -767,14 +755,12 @@ static void si_emit_shader_gs(struct si_context *sctx) return; /* R_028A60_VGT_GSVS_RING_OFFSET_1, R_028A64_VGT_GSVS_RING_OFFSET_2 - * R_028A68_VGT_GSVS_RING_OFFSET_3, R_028A6C_VGT_GS_OUT_PRIM_TYPE */ - radeon_opt_set_context_reg4(sctx, R_028A60_VGT_GSVS_RING_OFFSET_1, + * R_028A68_VGT_GSVS_RING_OFFSET_3 */ + radeon_opt_set_context_reg3(sctx, R_028A60_VGT_GSVS_RING_OFFSET_1, SI_TRACKED_VGT_GSVS_RING_OFFSET_1, shader->ctx_reg.gs.vgt_gsvs_ring_offset_1, shader->ctx_reg.gs.vgt_gsvs_ring_offset_2, - shader->ctx_reg.gs.vgt_gsvs_ring_offset_3, - shader->ctx_reg.gs.vgt_gs_out_prim_type); - + shader->ctx_reg.gs.vgt_gsvs_ring_offset_3); /* R_028AB0_VGT_GSVS_RING_ITEMSIZE */ radeon_opt_set_context_reg(sctx, R_028AB0_VGT_GSVS_RING_ITEMSIZE, @@ -855,9 +841,6 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) offset += num_components[2] * sel->gs_max_out_vertices; shader->ctx_reg.gs.vgt_gsvs_ring_offset_3 = offset; - shader->ctx_reg.gs.vgt_gs_out_prim_type = - si_conv_prim_to_gs_out(sel->gs_output_prim); - if (max_stream >= 3) offset += num_components[3] * sel->gs_max_out_vertices; shader->ctx_reg.gs.vgt_gsvs_ring_itemsize = offset; @@ -882,12 +865,10 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]; unsigned es_type = shader->key.part.gs.es->type; unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt; - struct gfx9_gs_info gs_info; - if (es_type == PIPE_SHADER_VERTEX) - /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */ - es_vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0; - else if (es_type == PIPE_SHADER_TESS_EVAL) + if (es_type == PIPE_SHADER_VERTEX) { + es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false); + } else if (es_type == PIPE_SHADER_TESS_EVAL) es_vgpr_comp_cnt = shader->key.part.gs.es->info.uses_primid ? 3 : 2; else unreachable("invalid shader selector type"); @@ -910,31 +891,44 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) else num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR; - gfx9_get_gs_info(shader->key.part.gs.es, sel, &gs_info); + if (sscreen->info.chip_class >= GFX10) { + si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8); + si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(va >> 40)); + } else { + si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8); + si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40)); + } - si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8); - si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40)); + uint32_t rsrc1 = + S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | + S_00B228_DX10_CLAMP(1) | + S_00B228_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B228_WGP_MODE(sscreen->info.chip_class >= GFX10) | + S_00B228_FLOAT_MODE(shader->config.float_mode) | + S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt); + uint32_t rsrc2 = + S_00B22C_USER_SGPR(num_user_sgprs) | + S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) | + S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) | + S_00B22C_LDS_SIZE(shader->config.lds_size) | + S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0); + + if (sscreen->info.chip_class >= GFX10) { + rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5); + } else { + rsrc1 |= S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8); + rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5); + } - si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, - S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) | - S_00B228_DX10_CLAMP(1) | - S_00B228_FLOAT_MODE(shader->config.float_mode) | - S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt)); - si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, - S_00B22C_USER_SGPR(num_user_sgprs) | - S_00B22C_USER_SGPR_MSB(num_user_sgprs >> 5) | - S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) | - S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) | - S_00B22C_LDS_SIZE(gs_info.lds_size) | - S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); + si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, rsrc1); + si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, rsrc2); shader->ctx_reg.gs.vgt_gs_onchip_cntl = - S_028A44_ES_VERTS_PER_SUBGRP(gs_info.es_verts_per_subgroup) | - S_028A44_GS_PRIMS_PER_SUBGRP(gs_info.gs_prims_per_subgroup) | - S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info.gs_inst_prims_in_subgroup); + S_028A44_ES_VERTS_PER_SUBGRP(shader->gs_info.es_verts_per_subgroup) | + S_028A44_GS_PRIMS_PER_SUBGRP(shader->gs_info.gs_prims_per_subgroup) | + S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->gs_info.gs_inst_prims_in_subgroup); shader->ctx_reg.gs.vgt_gs_max_prims_per_subgroup = - S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info.max_prims_per_subgroup); + S_028A94_MAX_PRIMS_PER_SUBGROUP(shader->gs_info.max_prims_per_subgroup); shader->ctx_reg.gs.vgt_esgs_ring_itemsize = shader->key.part.gs.es->esgs_itemsize / 4; @@ -958,6 +952,323 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) } } +/* Common tail code for NGG primitive shaders. */ +static void gfx10_emit_shader_ngg_tail(struct si_context *sctx, + struct si_shader *shader, + unsigned initial_cdw) +{ + radeon_opt_set_context_reg(sctx, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP, + SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP, + shader->ctx_reg.ngg.ge_max_output_per_subgroup); + radeon_opt_set_context_reg(sctx, R_028B4C_GE_NGG_SUBGRP_CNTL, + SI_TRACKED_GE_NGG_SUBGRP_CNTL, + shader->ctx_reg.ngg.ge_ngg_subgrp_cntl); + radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN, + SI_TRACKED_VGT_PRIMITIVEID_EN, + shader->ctx_reg.ngg.vgt_primitiveid_en); + radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, + SI_TRACKED_VGT_GS_ONCHIP_CNTL, + shader->ctx_reg.ngg.vgt_gs_onchip_cntl); + radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT, + SI_TRACKED_VGT_GS_INSTANCE_CNT, + shader->ctx_reg.ngg.vgt_gs_instance_cnt); + radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE, + SI_TRACKED_VGT_ESGS_RING_ITEMSIZE, + shader->ctx_reg.ngg.vgt_esgs_ring_itemsize); + radeon_opt_set_context_reg(sctx, R_0286C4_SPI_VS_OUT_CONFIG, + SI_TRACKED_SPI_VS_OUT_CONFIG, + shader->ctx_reg.ngg.spi_vs_out_config); + radeon_opt_set_context_reg2(sctx, R_028708_SPI_SHADER_IDX_FORMAT, + SI_TRACKED_SPI_SHADER_IDX_FORMAT, + shader->ctx_reg.ngg.spi_shader_idx_format, + shader->ctx_reg.ngg.spi_shader_pos_format); + radeon_opt_set_context_reg(sctx, R_028818_PA_CL_VTE_CNTL, + SI_TRACKED_PA_CL_VTE_CNTL, + shader->ctx_reg.ngg.pa_cl_vte_cntl); + radeon_opt_set_context_reg(sctx, R_028838_PA_CL_NGG_CNTL, + SI_TRACKED_PA_CL_NGG_CNTL, + shader->ctx_reg.ngg.pa_cl_ngg_cntl); + + radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL, + SI_TRACKED_PA_CL_VS_OUT_CNTL__VS, + shader->pa_cl_vs_out_cntl, + SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK); + + if (initial_cdw != sctx->gfx_cs->current.cdw) + sctx->context_roll = true; +} + +static void gfx10_emit_shader_ngg_notess_nogs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +static void gfx10_emit_shader_ngg_tess_nogs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, + SI_TRACKED_VGT_TF_PARAM, + shader->vgt_tf_param); + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +static void gfx10_emit_shader_ngg_notess_gs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, + SI_TRACKED_VGT_GS_MAX_VERT_OUT, + shader->ctx_reg.ngg.vgt_gs_max_vert_out); + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +static void gfx10_emit_shader_ngg_tess_gs(struct si_context *sctx) +{ + struct si_shader *shader = sctx->queued.named.gs->shader; + unsigned initial_cdw = sctx->gfx_cs->current.cdw; + + if (!shader) + return; + + radeon_opt_set_context_reg(sctx, R_028B38_VGT_GS_MAX_VERT_OUT, + SI_TRACKED_VGT_GS_MAX_VERT_OUT, + shader->ctx_reg.ngg.vgt_gs_max_vert_out); + radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, + SI_TRACKED_VGT_TF_PARAM, + shader->vgt_tf_param); + + gfx10_emit_shader_ngg_tail(sctx, shader, initial_cdw); +} + +unsigned si_get_input_prim(const struct si_shader_selector *gs) +{ + if (gs->type == PIPE_SHADER_GEOMETRY) + return gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]; + + if (gs->type == PIPE_SHADER_TESS_EVAL) { + if (gs->info.properties[TGSI_PROPERTY_TES_POINT_MODE]) + return PIPE_PRIM_POINTS; + if (gs->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES) + return PIPE_PRIM_LINES; + return PIPE_PRIM_TRIANGLES; + } + + /* TODO: Set this correctly if the primitive type is set in the shader key. */ + return PIPE_PRIM_TRIANGLES; /* worst case for all callers */ +} + +static unsigned si_get_vs_out_cntl(const struct si_shader_selector *sel, bool ngg) +{ + bool misc_vec_ena = + sel->info.writes_psize || (sel->info.writes_edgeflag && !ngg) || + sel->info.writes_layer || sel->info.writes_viewport_index; + return S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) | + S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag && !ngg) | + S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) | + S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) | + S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) | + S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena); +} + +/** + * Prepare the PM4 image for \p shader, which will run as a merged ESGS shader + * in NGG mode. + */ +static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader) +{ + const struct si_shader_selector *gs_sel = shader->selector; + const struct tgsi_shader_info *gs_info = &gs_sel->info; + enum pipe_shader_type gs_type = shader->selector->type; + const struct si_shader_selector *es_sel = + shader->previous_stage_sel ? shader->previous_stage_sel : shader->selector; + const struct tgsi_shader_info *es_info = &es_sel->info; + enum pipe_shader_type es_type = es_sel->type; + unsigned num_user_sgprs; + unsigned nparams, es_vgpr_comp_cnt, gs_vgpr_comp_cnt; + uint64_t va; + unsigned window_space = + gs_info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION]; + bool es_enable_prim_id = shader->key.mono.u.vs_export_prim_id || es_info->uses_primid; + unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1); + unsigned input_prim = si_get_input_prim(gs_sel); + bool break_wave_at_eoi = false; + struct si_pm4_state *pm4 = si_get_shader_pm4_state(shader); + if (!pm4) + return; + + if (es_type == PIPE_SHADER_TESS_EVAL) { + pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_tess_gs + : gfx10_emit_shader_ngg_tess_nogs; + } else { + pm4->atom.emit = gs_type == PIPE_SHADER_GEOMETRY ? gfx10_emit_shader_ngg_notess_gs + : gfx10_emit_shader_ngg_notess_nogs; + } + + va = shader->bo->gpu_address; + si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); + + if (es_type == PIPE_SHADER_VERTEX) { + es_vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, false); + + if (es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) { + num_user_sgprs = SI_SGPR_VS_BLIT_DATA + + es_info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]; + } else { + num_user_sgprs = si_get_num_vs_user_sgprs(GFX9_VSGS_NUM_USER_SGPR); + } + } else { + assert(es_type == PIPE_SHADER_TESS_EVAL); + es_vgpr_comp_cnt = es_enable_prim_id ? 3 : 2; + num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR; + + if (es_enable_prim_id || gs_info->uses_primid) + break_wave_at_eoi = true; + } + + /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and + * VGPR[0:4] are always loaded. + * + * Vertex shaders always need to load VGPR3, because they need to + * pass edge flags for decomposed primitives (such as quads) to the PA + * for the GL_LINE polygon mode to skip rendering lines on inner edges. + */ + if (gs_info->uses_invocationid || gs_type == PIPE_SHADER_VERTEX) + gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID, edge flags. */ + else if (gs_info->uses_primid) + gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */ + else if (input_prim >= PIPE_PRIM_TRIANGLES) + gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */ + else + gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */ + + si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8); + si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40); + si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, + S_00B228_VGPRS((shader->config.num_vgprs - 1) / + (sscreen->ge_wave_size == 32 ? 8 : 4)) | + S_00B228_FLOAT_MODE(shader->config.float_mode) | + S_00B228_DX10_CLAMP(1) | + S_00B228_MEM_ORDERED(1) | + S_00B228_WGP_MODE(1) | + S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt)); + si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, + S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0) | + S_00B22C_USER_SGPR(num_user_sgprs) | + S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) | + S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5) | + S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) | + S_00B22C_LDS_SIZE(shader->config.lds_size)); + + nparams = MAX2(shader->info.nr_param_exports, 1); + shader->ctx_reg.ngg.spi_vs_out_config = + S_0286C4_VS_EXPORT_COUNT(nparams - 1) | + S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0); + + shader->ctx_reg.ngg.spi_shader_idx_format = + S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP); + shader->ctx_reg.ngg.spi_shader_pos_format = + S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | + S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE) | + S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE) | + S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? + V_02870C_SPI_SHADER_4COMP : + V_02870C_SPI_SHADER_NONE); + + shader->ctx_reg.ngg.vgt_primitiveid_en = + S_028A84_PRIMITIVEID_EN(es_enable_prim_id) | + S_028A84_NGG_DISABLE_PROVOK_REUSE(shader->key.mono.u.vs_export_prim_id || + gs_sel->info.writes_primid); + + if (gs_type == PIPE_SHADER_GEOMETRY) { + shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = es_sel->esgs_itemsize / 4; + shader->ctx_reg.ngg.vgt_gs_max_vert_out = gs_sel->gs_max_out_vertices; + } else { + shader->ctx_reg.ngg.vgt_esgs_ring_itemsize = 1; + } + + if (es_type == PIPE_SHADER_TESS_EVAL) + si_set_tesseval_regs(sscreen, es_sel, pm4); + + shader->ctx_reg.ngg.vgt_gs_onchip_cntl = + S_028A44_ES_VERTS_PER_SUBGRP(shader->ngg.hw_max_esverts) | + S_028A44_GS_PRIMS_PER_SUBGRP(shader->ngg.max_gsprims) | + S_028A44_GS_INST_PRIMS_IN_SUBGRP(shader->ngg.max_gsprims * gs_num_invocations); + shader->ctx_reg.ngg.ge_max_output_per_subgroup = + S_0287FC_MAX_VERTS_PER_SUBGROUP(shader->ngg.max_out_verts); + shader->ctx_reg.ngg.ge_ngg_subgrp_cntl = + S_028B4C_PRIM_AMP_FACTOR(shader->ngg.prim_amp_factor) | + S_028B4C_THDS_PER_SUBGRP(0); /* for fast launch */ + shader->ctx_reg.ngg.vgt_gs_instance_cnt = + S_028B90_CNT(gs_num_invocations) | + S_028B90_ENABLE(gs_num_invocations > 1) | + S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE( + shader->ngg.max_vert_out_per_gs_instance); + + /* Always output hw-generated edge flags and pass them via the prim + * export to prevent drawing lines on internal edges of decomposed + * primitives (such as quads) with polygon mode = lines. Only VS needs + * this. + */ + shader->ctx_reg.ngg.pa_cl_ngg_cntl = + S_028838_INDEX_BUF_EDGE_FLAG_ENA(gs_type == PIPE_SHADER_VERTEX); + shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(gs_sel, true); + + shader->ge_cntl = + S_03096C_PRIM_GRP_SIZE(shader->ngg.max_gsprims) | + S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */ + S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi); + + /* Bug workaround for a possible hang with non-tessellation cases. + * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0 + * + * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5 + */ + if ((sscreen->info.family == CHIP_NAVI10 || + sscreen->info.family == CHIP_NAVI12 || + sscreen->info.family == CHIP_NAVI14) && + (es_type == PIPE_SHADER_VERTEX || gs_type == PIPE_SHADER_VERTEX) && /* = no tess */ + shader->ngg.hw_max_esverts != 256) { + shader->ge_cntl &= C_03096C_VERT_GRP_SIZE; + + if (shader->ngg.hw_max_esverts > 5) { + shader->ge_cntl |= + S_03096C_VERT_GRP_SIZE(shader->ngg.hw_max_esverts - 5); + } + } + + if (window_space) { + shader->ctx_reg.ngg.pa_cl_vte_cntl = + S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1); + } else { + shader->ctx_reg.ngg.pa_cl_vte_cntl = + S_028818_VTX_W0_FMT(1) | + S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | + S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | + S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1); + } +} + static void si_emit_shader_vs(struct si_context *sctx) { struct si_shader *shader = sctx->queued.named.vs->shader; @@ -1001,6 +1312,23 @@ static void si_emit_shader_vs(struct si_context *sctx) SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL, shader->vgt_vertex_reuse_block_cntl); + /* Required programming for tessellation. (legacy pipeline only) */ + if (sctx->chip_class == GFX10 && + shader->selector->type == PIPE_SHADER_TESS_EVAL) { + radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, + SI_TRACKED_VGT_GS_ONCHIP_CNTL, + S_028A44_ES_VERTS_PER_SUBGRP(250) | + S_028A44_GS_PRIMS_PER_SUBGRP(126) | + S_028A44_GS_INST_PRIMS_IN_SUBGRP(126)); + } + + if (sctx->chip_class >= GFX10) { + radeon_opt_set_context_reg_rmw(sctx, R_02881C_PA_CL_VS_OUT_CNTL, + SI_TRACKED_PA_CL_VS_OUT_CNTL__VS, + shader->pa_cl_vs_out_cntl, + SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK); + } + if (initial_cdw != sctx->gfx_cs->current.cdw) sctx->context_roll = true; } @@ -1065,15 +1393,11 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */ num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR; } else if (shader->selector->type == PIPE_SHADER_VERTEX) { - /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID) - * If PrimID is disabled. InstanceID / StepRate1 is loaded instead. - * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded. - */ - vgpr_comp_cnt = enable_prim_id ? 2 : (shader->info.uses_instanceid ? 1 : 0); + vgpr_comp_cnt = si_get_vs_vgpr_comp_cnt(sscreen, shader, enable_prim_id); - if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) { + if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) { num_user_sgprs = SI_SGPR_VS_BLIT_DATA + - info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]; + info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]; } else { num_user_sgprs = si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR); } @@ -1087,6 +1411,11 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, nparams = MAX2(shader->info.nr_param_exports, 1); shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1); + if (sscreen->info.chip_class >= GFX10) { + shader->ctx_reg.vs.spi_vs_out_config |= + S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0); + } + shader->ctx_reg.vs.spi_shader_pos_format = S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? @@ -1098,26 +1427,36 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? V_02870C_SPI_SHADER_4COMP : V_02870C_SPI_SHADER_NONE); + shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, false); oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0; si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8); si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(va >> 40)); - si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, - S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8) | - S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) | - S_00B128_DX10_CLAMP(1) | - S_00B128_FLOAT_MODE(shader->config.float_mode)); - si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, - S_00B12C_USER_SGPR(num_user_sgprs) | - S_00B12C_OC_LDS_EN(oc_lds_en) | - S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) | - S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) | - S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) | - S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) | - S_00B12C_SO_EN(!!shader->selector->so.num_outputs) | - S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); + + uint32_t rsrc1 = S_00B128_VGPRS((shader->config.num_vgprs - 1) / + (sscreen->ge_wave_size == 32 ? 8 : 4)) | + S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) | + S_00B128_DX10_CLAMP(1) | + S_00B128_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B128_FLOAT_MODE(shader->config.float_mode); + uint32_t rsrc2 = S_00B12C_USER_SGPR(num_user_sgprs) | + S_00B12C_OC_LDS_EN(oc_lds_en) | + S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0); + + if (sscreen->info.chip_class <= GFX9) + rsrc1 |= S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8); + + if (!sscreen->use_ngg_streamout) { + rsrc2 |= S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) | + S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) | + S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) | + S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) | + S_00B12C_SO_EN(!!shader->selector->so.num_outputs); + } + + si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, rsrc1); + si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, rsrc2); if (window_space) shader->ctx_reg.vs.pa_cl_vte_cntl = @@ -1197,7 +1536,7 @@ static void si_emit_shader_ps(struct si_context *sctx) sctx->context_roll = true; } -static void si_shader_ps(struct si_shader *shader) +static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader) { struct tgsi_shader_info *info = &shader->selector->info; struct si_pm4_state *pm4; @@ -1293,8 +1632,15 @@ static void si_shader_ps(struct si_shader *shader) * stalls without this setting. * * Don't add this to CB_SHADER_MASK. + * + * GFX10 supports pixel shaders without exports by setting both + * the color and Z formats to SPI_SHADER_ZERO. The hw will skip export + * instructions if any are present. */ - if (!spi_shader_col_format && + if ((sscreen->info.chip_class <= GFX9 || + info->uses_kill || + shader->key.part.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS) && + !spi_shader_col_format && !info->writes_z && !info->writes_stencil && !info->writes_samplemask) spi_shader_col_format = V_028714_SPI_SHADER_32_R; @@ -1302,7 +1648,8 @@ static void si_shader_ps(struct si_shader *shader) shader->ctx_reg.ps.spi_ps_input_addr = shader->config.spi_ps_input_addr; /* Set interpolation controls. */ - spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader)); + spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader)) | + S_0286D8_PS_W32_EN(sscreen->ps_wave_size == 32); shader->ctx_reg.ps.spi_baryc_cntl = spi_baryc_cntl; shader->ctx_reg.ps.spi_ps_in_control = spi_ps_in_control; @@ -1318,11 +1665,18 @@ static void si_shader_ps(struct si_shader *shader) si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8); si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, S_00B024_MEM_BASE(va >> 40)); - si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, - S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8) | - S_00B028_DX10_CLAMP(1) | - S_00B028_FLOAT_MODE(shader->config.float_mode)); + uint32_t rsrc1 = + S_00B028_VGPRS((shader->config.num_vgprs - 1) / + (sscreen->ps_wave_size == 32 ? 8 : 4)) | + S_00B028_DX10_CLAMP(1) | + S_00B028_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B028_FLOAT_MODE(shader->config.float_mode); + + if (sscreen->info.chip_class < GFX10) { + rsrc1 |= S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8); + } + + si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, rsrc1); si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS, S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) | S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) | @@ -1338,6 +1692,8 @@ static void si_shader_init_pm4_state(struct si_screen *sscreen, si_shader_ls(sscreen, shader); else if (shader->key.as_es) si_shader_es(sscreen, shader); + else if (shader->key.as_ngg) + gfx10_shader_ngg(sscreen, shader); else si_shader_vs(sscreen, shader, NULL); break; @@ -1347,14 +1703,19 @@ static void si_shader_init_pm4_state(struct si_screen *sscreen, case PIPE_SHADER_TESS_EVAL: if (shader->key.as_es) si_shader_es(sscreen, shader); + else if (shader->key.as_ngg) + gfx10_shader_ngg(sscreen, shader); else si_shader_vs(sscreen, shader, NULL); break; case PIPE_SHADER_GEOMETRY: - si_shader_gs(sscreen, shader); + if (shader->key.as_ngg) + gfx10_shader_ngg(sscreen, shader); + else + si_shader_gs(sscreen, shader); break; case PIPE_SHADER_FRAGMENT: - si_shader_ps(shader); + si_shader_ps(sscreen, shader); break; default: assert(0); @@ -1364,10 +1725,7 @@ static void si_shader_init_pm4_state(struct si_screen *sscreen, static unsigned si_get_alpha_test_func(struct si_context *sctx) { /* Alpha-test should be disabled if colorbuffer 0 is integer. */ - if (sctx->queued.named.dsa) - return sctx->queued.named.dsa->alpha_func; - - return PIPE_FUNC_ALWAYS; + return sctx->queued.named.dsa->alpha_func; } void si_shader_selector_key_vs(struct si_context *sctx, @@ -1376,7 +1734,7 @@ void si_shader_selector_key_vs(struct si_context *sctx, struct si_vs_prolog_bits *prolog_key) { if (!sctx->vertex_elements || - vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) + vs->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) return; struct si_vertex_elements *elts = sctx->vertex_elements; @@ -1434,13 +1792,11 @@ static void si_shader_selector_key_hw_vs(struct si_context *sctx, /* Find out if PS is disabled. */ bool ps_disabled = true; if (ps) { - const struct si_state_blend *blend = sctx->queued.named.blend; - bool alpha_to_coverage = blend && blend->alpha_to_coverage; bool ps_modifies_zs = ps->info.uses_kill || ps->info.writes_z || ps->info.writes_stencil || ps->info.writes_samplemask || - alpha_to_coverage || + sctx->queued.named.blend->alpha_to_coverage || si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS; unsigned ps_colormask = si_get_total_colormask(sctx); @@ -1471,6 +1827,7 @@ static void si_shader_selector_key_hw_vs(struct si_context *sctx, /* Compute the key for the hw shader variant */ static inline void si_shader_selector_key(struct pipe_context *ctx, struct si_shader_selector *sel, + union si_vgt_stages_key stages_key, struct si_shader_key *key) { struct si_context *sctx = (struct si_context *)ctx; @@ -1483,9 +1840,11 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, if (sctx->tes_shader.cso) key->as_ls = 1; - else if (sctx->gs_shader.cso) + else if (sctx->gs_shader.cso) { key->as_es = 1; - else { + key->as_ngg = stages_key.u.ngg; + } else { + key->as_ngg = stages_key.u.ngg; si_shader_selector_key_hw_vs(sctx, sel, key); if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid) @@ -1524,6 +1883,8 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, key->mono.u.ff_tcs_inputs_to_copy = sctx->vs_shader.cso->outputs_written; break; case PIPE_SHADER_TESS_EVAL: + key->as_ngg = stages_key.u.ngg; + if (sctx->gs_shader.cso) key->as_es = 1; else { @@ -1544,6 +1905,8 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, key->part.gs.prolog.gfx9_prev_is_vs = 1; } + key->as_ngg = stages_key.u.ngg; + /* Merged ES-GS can have unbalanced wave usage. * * ES threads are per-vertex, while GS threads are @@ -1573,35 +1936,33 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, sel->info.colors_written == 0x1) key->part.ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1; - if (blend) { - /* Select the shader color format based on whether - * blending or alpha are needed. - */ - key->part.ps.epilog.spi_shader_col_format = - (blend->blend_enable_4bit & blend->need_src_alpha_4bit & - sctx->framebuffer.spi_shader_col_format_blend_alpha) | - (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit & - sctx->framebuffer.spi_shader_col_format_blend) | - (~blend->blend_enable_4bit & blend->need_src_alpha_4bit & - sctx->framebuffer.spi_shader_col_format_alpha) | - (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit & - sctx->framebuffer.spi_shader_col_format); - key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit; - - /* The output for dual source blending should have - * the same format as the first output. - */ - if (blend->dual_src_blend) - key->part.ps.epilog.spi_shader_col_format |= - (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4; - } else - key->part.ps.epilog.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format; + /* Select the shader color format based on whether + * blending or alpha are needed. + */ + key->part.ps.epilog.spi_shader_col_format = + (blend->blend_enable_4bit & blend->need_src_alpha_4bit & + sctx->framebuffer.spi_shader_col_format_blend_alpha) | + (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit & + sctx->framebuffer.spi_shader_col_format_blend) | + (~blend->blend_enable_4bit & blend->need_src_alpha_4bit & + sctx->framebuffer.spi_shader_col_format_alpha) | + (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit & + sctx->framebuffer.spi_shader_col_format); + key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit; + + /* The output for dual source blending should have + * the same format as the first output. + */ + if (blend->dual_src_blend) { + key->part.ps.epilog.spi_shader_col_format |= + (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4; + } /* If alpha-to-coverage is enabled, we have to export alpha * even if there is no color buffer. */ if (!(key->part.ps.epilog.spi_shader_col_format & 0xf) && - blend && blend->alpha_to_coverage) + blend->alpha_to_coverage) key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR; /* On GFX6 and GFX7 except Hawaii, the CB doesn't clamp outputs @@ -1626,10 +1987,8 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, key->part.ps.prolog.color_two_side = rs->two_side && sel->info.colors_read; key->part.ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read; - if (sctx->queued.named.blend) { - key->part.ps.epilog.alpha_to_one = sctx->queued.named.blend->alpha_to_one && - rs->multisample_enable; - } + key->part.ps.epilog.alpha_to_one = blend->alpha_to_one && + rs->multisample_enable; key->part.ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly; key->part.ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) || @@ -1675,7 +2034,8 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, sel->info.uses_linear_centroid + sel->info.uses_linear_sample > 1; - if (sel->info.opcode_count[TGSI_OPCODE_INTERP_SAMPLE]) + if (sel->info.uses_persp_opcode_interp_sample || + sel->info.uses_linear_opcode_interp_sample) key->mono.u.ps.interpolate_at_sample_force_center = 1; } @@ -1715,7 +2075,6 @@ static void si_build_shader_variant(struct si_shader *shader, struct si_screen *sscreen = sel->screen; struct ac_llvm_compiler *compiler; struct pipe_debug_callback *debug = &shader->compiler_ctx_state.debug; - int r; if (thread_index >= 0) { if (low_priority) { @@ -1732,10 +2091,12 @@ static void si_build_shader_variant(struct si_shader *shader, compiler = shader->compiler_ctx_state.compiler; } - r = si_shader_create(sscreen, compiler, shader, debug); - if (unlikely(r)) { - PRINT_ERR("Failed to build shader variant (type=%u) %d\n", - sel->type, r); + if (!compiler->passes) + si_init_compiler(sscreen, compiler); + + if (unlikely(!si_shader_create(sscreen, compiler, shader, debug))) { + PRINT_ERR("Failed to build shader variant (type=%u)\n", + sel->type); shader->compilation_failed = true; return; } @@ -1744,7 +2105,7 @@ static void si_build_shader_variant(struct si_shader *shader, FILE *f = open_memstream(&shader->shader_log, &shader->shader_log_size); if (f) { - si_shader_dump(sscreen, shader, NULL, sel->type, f, false); + si_shader_dump(sscreen, shader, NULL, f, false); fclose(f); } } @@ -1784,9 +2145,10 @@ static bool si_check_missing_main_part(struct si_screen *sscreen, main_part->selector = sel; main_part->key.as_es = key->as_es; main_part->key.as_ls = key->as_ls; + main_part->key.as_ngg = key->as_ngg; main_part->is_monolithic = false; - if (si_compile_tgsi_shader(sscreen, compiler_state->compiler, + if (si_compile_shader(sscreen, compiler_state->compiler, main_part, &compiler_state->debug) != 0) { FREE(main_part); return false; @@ -1848,14 +2210,14 @@ current_not_ready: if (thread_index < 0) util_queue_fence_wait(&sel->ready); - mtx_lock(&sel->mutex); + simple_mtx_lock(&sel->mutex); /* Find the shader variant. */ for (iter = sel->first_variant; iter; iter = iter->next_variant) { /* Don't check the "current" shader. We checked it above. */ if (current != iter && memcmp(&iter->key, key, sizeof(*key)) == 0) { - mtx_unlock(&sel->mutex); + simple_mtx_unlock(&sel->mutex); if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) { /* If it's an optimized shader and its compilation has @@ -1884,7 +2246,7 @@ current_not_ready: /* Build a new shader. */ shader = CALLOC_STRUCT(si_shader); if (!shader) { - mtx_unlock(&sel->mutex); + simple_mtx_unlock(&sel->mutex); return -ENOMEM; } @@ -1917,37 +2279,45 @@ current_not_ready: */ if (!is_pure_monolithic && !key->opt.vs_as_prim_discard_cs) { - bool ok; + bool ok = true; /* Make sure the main shader part is present. This is needed * for shaders that can be compiled as VS, LS, or ES, and only * one of them is compiled at creation. * + * It is also needed for GS, which can be compiled as non-NGG + * and NGG. + * * For merged shaders, check that the starting shader's main * part is present. */ if (previous_stage_sel) { struct si_shader_key shader1_key = zeroed; - if (sel->type == PIPE_SHADER_TESS_CTRL) + if (sel->type == PIPE_SHADER_TESS_CTRL) { shader1_key.as_ls = 1; - else if (sel->type == PIPE_SHADER_GEOMETRY) + } else if (sel->type == PIPE_SHADER_GEOMETRY) { shader1_key.as_es = 1; - else + shader1_key.as_ngg = key->as_ngg; /* for Wave32 vs Wave64 */ + } else { assert(0); + } - mtx_lock(&previous_stage_sel->mutex); + simple_mtx_lock(&previous_stage_sel->mutex); ok = si_check_missing_main_part(sscreen, previous_stage_sel, compiler_state, &shader1_key); - mtx_unlock(&previous_stage_sel->mutex); - } else { + simple_mtx_unlock(&previous_stage_sel->mutex); + } + + if (ok) { ok = si_check_missing_main_part(sscreen, sel, compiler_state, key); } + if (!ok) { FREE(shader); - mtx_unlock(&sel->mutex); + simple_mtx_unlock(&sel->mutex); return -ENOMEM; /* skip the draw call */ } } @@ -1977,7 +2347,8 @@ current_not_ready: /* Compile it asynchronously. */ util_queue_add_job(&sscreen->shader_compiler_queue_low_priority, shader, &shader->ready, - si_build_shader_variant_low_priority, NULL); + si_build_shader_variant_low_priority, NULL, + 0); /* Add only after the ready fence was reset, to guard against a * race with si_bind_XX_shader. */ @@ -1991,7 +2362,7 @@ current_not_ready: /* Use the default (unoptimized) shader for now. */ memset(&key->opt, 0, sizeof(key->opt)); - mtx_unlock(&sel->mutex); + simple_mtx_unlock(&sel->mutex); if (sscreen->options.sync_compile) util_queue_fence_wait(&shader->ready); @@ -2012,7 +2383,7 @@ current_not_ready: sel->last_variant = shader; } - mtx_unlock(&sel->mutex); + simple_mtx_unlock(&sel->mutex); assert(!shader->is_optimized); si_build_shader_variant(shader, thread_index, false); @@ -2027,12 +2398,13 @@ current_not_ready: static int si_shader_select(struct pipe_context *ctx, struct si_shader_ctx_state *state, + union si_vgt_stages_key stages_key, struct si_compiler_ctx_state *compiler_state) { struct si_context *sctx = (struct si_context *)ctx; struct si_shader_key key; - si_shader_selector_key(ctx, state->cso, &key); + si_shader_selector_key(ctx, state->cso, stages_key, &key); return si_shader_select_with_key(sctx->screen, state, compiler_state, &key, -1, false); } @@ -2089,8 +2461,25 @@ static void si_init_shader_selector_async(void *job, int thread_index) assert(thread_index < ARRAY_SIZE(sscreen->compiler)); compiler = &sscreen->compiler[thread_index]; - if (sel->nir) - si_lower_nir(sel); + if (!compiler->passes) + si_init_compiler(sscreen, compiler); + + /* Serialize NIR to save memory. Monolithic shader variants + * have to deserialize NIR before compilation. + */ + if (sel->nir) { + struct blob blob; + size_t size; + + blob_init(&blob); + /* true = remove optional debugging data to increase + * the likehood of getting more shader cache hits. + * It also drops variable names, so we'll save more memory. + */ + nir_serialize(&blob, sel->nir, true); + blob_finish_get_buffer(&blob, &sel->nir_binary, &size); + sel->nir_size = size; + } /* Compile the main shader part for use with a prolog and/or epilog. * If this fails, the driver will try to compile a monolithic shader @@ -2098,7 +2487,7 @@ static void si_init_shader_selector_async(void *job, int thread_index) */ if (!sscreen->use_monolithic_shaders) { struct si_shader *shader = CALLOC_STRUCT(si_shader); - void *ir_binary = NULL; + unsigned char ir_sha1_cache_key[20]; if (!shader) { fprintf(stderr, "radeonsi: can't allocate a main shader part\n"); @@ -2115,34 +2504,39 @@ static void si_init_shader_selector_async(void *job, int thread_index) sel->so.num_outputs != 0, &shader->key); - if (sel->tokens || sel->nir) - ir_binary = si_get_ir_binary(sel); + if (sscreen->use_ngg && + (!sel->so.num_outputs || sscreen->use_ngg_streamout) && + ((sel->type == PIPE_SHADER_VERTEX && !shader->key.as_ls) || + sel->type == PIPE_SHADER_TESS_EVAL || + sel->type == PIPE_SHADER_GEOMETRY)) + shader->key.as_ngg = 1; + + if (sel->nir) { + si_get_ir_cache_key(sel, shader->key.as_ngg, + shader->key.as_es, ir_sha1_cache_key); + } /* Try to load the shader from the shader cache. */ - mtx_lock(&sscreen->shader_cache_mutex); + simple_mtx_lock(&sscreen->shader_cache_mutex); - if (ir_binary && - si_shader_cache_load_shader(sscreen, ir_binary, shader)) { - mtx_unlock(&sscreen->shader_cache_mutex); - si_shader_dump_stats_for_shader_db(shader, debug); + if (si_shader_cache_load_shader(sscreen, ir_sha1_cache_key, shader)) { + simple_mtx_unlock(&sscreen->shader_cache_mutex); + si_shader_dump_stats_for_shader_db(sscreen, shader, debug); } else { - mtx_unlock(&sscreen->shader_cache_mutex); + simple_mtx_unlock(&sscreen->shader_cache_mutex); /* Compile the shader if it hasn't been loaded from the cache. */ - if (si_compile_tgsi_shader(sscreen, compiler, shader, + if (si_compile_shader(sscreen, compiler, shader, debug) != 0) { FREE(shader); - FREE(ir_binary); fprintf(stderr, "radeonsi: can't compile a main shader part\n"); return; } - if (ir_binary) { - mtx_lock(&sscreen->shader_cache_mutex); - if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true)) - FREE(ir_binary); - mtx_unlock(&sscreen->shader_cache_mutex); - } + simple_mtx_lock(&sscreen->shader_cache_mutex); + si_shader_cache_insert_shader(sscreen, ir_sha1_cache_key, + shader, true); + simple_mtx_unlock(&sscreen->shader_cache_mutex); } *si_get_main_shader_part(sel, &shader->key) = shader; @@ -2191,7 +2585,10 @@ static void si_init_shader_selector_async(void *job, int thread_index) } /* The GS copy shader is always pre-compiled. */ - if (sel->type == PIPE_SHADER_GEOMETRY) { + if (sel->type == PIPE_SHADER_GEOMETRY && + (!sscreen->use_ngg || + !sscreen->use_ngg_streamout || /* also for PRIMITIVES_GENERATED */ + sel->tess_turns_off_ngg)) { sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, compiler, sel, debug); if (!sel->gs_copy_shader) { fprintf(stderr, "radeonsi: can't create GS copy shader\n"); @@ -2200,6 +2597,12 @@ static void si_init_shader_selector_async(void *job, int thread_index) si_shader_vs(sscreen, sel->gs_copy_shader, sel); } + + /* Free NIR. We only keep serialized NIR after this point. */ + if (sel->nir) { + ralloc_free(sel->nir); + sel->nir = NULL; + } } void si_schedule_initial_compile(struct si_context *sctx, unsigned processor, @@ -2221,7 +2624,7 @@ void si_schedule_initial_compile(struct si_context *sctx, unsigned processor, } util_queue_add_job(&sctx->screen->shader_compiler_queue, job, - ready_fence, execute, NULL); + ready_fence, execute, NULL, 0); if (debug) { util_queue_fence_wait(ready_fence); @@ -2238,12 +2641,13 @@ void si_get_active_slot_masks(const struct tgsi_shader_info *info, uint32_t *const_and_shader_buffers, uint64_t *samplers_and_images) { - unsigned start, num_shaderbufs, num_constbufs, num_images, num_samplers; + unsigned start, num_shaderbufs, num_constbufs, num_images, num_msaa_images, num_samplers; num_shaderbufs = util_last_bit(info->shader_buffers_declared); num_constbufs = util_last_bit(info->const_buffers_declared); /* two 8-byte images share one 16-byte slot */ num_images = align(util_last_bit(info->images_declared), 2); + num_msaa_images = align(util_last_bit(info->msaa_images_declared), 2); num_samplers = util_last_bit(info->samplers_declared); /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */ @@ -2251,7 +2655,18 @@ void si_get_active_slot_masks(const struct tgsi_shader_info *info, *const_and_shader_buffers = u_bit_consecutive(start, num_shaderbufs + num_constbufs); - /* The layout is: image[last] ... image[0], sampler[0] ... sampler[last] */ + /* The layout is: + * - fmask[last] ... fmask[0] go to [15-last .. 15] + * - image[last] ... image[0] go to [31-last .. 31] + * - sampler[0] ... sampler[last] go to [32 .. 32+last*2] + * + * FMASKs for images are placed separately, because MSAA images are rare, + * and so we can benefit from a better cache hit rate if we keep image + * descriptors together. + */ + if (num_msaa_images) + num_images = SI_NUM_IMAGES + num_msaa_images; /* add FMASK descriptors */ + start = si_get_image_slot(num_images - 1) / 2; *samplers_and_images = u_bit_consecutive64(start, num_images / 2 + num_samplers); @@ -2276,24 +2691,16 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->so = state->stream_output; if (state->type == PIPE_SHADER_IR_TGSI) { - sel->tokens = tgsi_dup_tokens(state->tokens); - if (!sel->tokens) { - FREE(sel); - return NULL; - } - - tgsi_scan_shader(state->tokens, &sel->info); - tgsi_scan_tess_ctrl(state->tokens, &sel->info, &sel->tcs_info); + sel->nir = tgsi_to_nir(state->tokens, ctx->screen); } else { assert(state->type == PIPE_SHADER_IR_NIR); - sel->nir = state->ir.nir; - - si_nir_opts(sel->nir); - si_nir_scan_shader(sel->nir, &sel->info); - si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info); } + si_nir_scan_shader(sel->nir, &sel->info); + si_nir_scan_tess_ctrl(sel->nir, &sel->tcs_info); + si_nir_adjust_driver_locations(sel->nir); + sel->type = sel->info.processor; p_atomic_inc(&sscreen->num_shaders_created); si_get_active_slot_masks(&sel->info, @@ -2307,10 +2714,14 @@ static void *si_create_shader_selector(struct pipe_context *ctx, (sel->so.output[i].stream * 4); } + sel->num_vs_inputs = sel->type == PIPE_SHADER_VERTEX && + !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] ? + sel->info.num_inputs : 0; + /* The prolog is a no-op if there are no inputs. */ sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX && sel->info.num_inputs && - !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS]; + !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]; sel->force_correct_derivs_after_kill = sel->type == PIPE_SHADER_FRAGMENT && @@ -2327,25 +2738,16 @@ static void *si_create_shader_selector(struct pipe_context *ctx, !sel->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] && !sel->so.num_outputs; - /* Set which opcode uses which (i,j) pair. */ - if (sel->info.uses_persp_opcode_interp_centroid) - sel->info.uses_persp_centroid = true; - - if (sel->info.uses_linear_opcode_interp_centroid) - sel->info.uses_linear_centroid = true; - - if (sel->info.uses_persp_opcode_interp_offset || - sel->info.uses_persp_opcode_interp_sample) - sel->info.uses_persp_center = true; - - if (sel->info.uses_linear_opcode_interp_offset || - sel->info.uses_linear_opcode_interp_sample) - sel->info.uses_linear_center = true; - switch (sel->type) { case PIPE_SHADER_GEOMETRY: sel->gs_output_prim = sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM]; + + /* Only possibilities: POINTS, LINE_STRIP, TRIANGLES */ + sel->rast_prim = sel->gs_output_prim; + if (util_rast_prim_is_triangles(sel->rast_prim)) + sel->rast_prim = PIPE_PRIM_TRIANGLES; + sel->gs_max_out_vertices = sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES]; sel->gs_num_invocations = @@ -2361,6 +2763,11 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->gs_input_verts_per_prim = u_vertices_per_prim(sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]); + + /* EN_MAX_VERT_OUT_PER_GS_INSTANCE does not work with tesselation. */ + sel->tess_turns_off_ngg = + sscreen->info.chip_class == GFX10 && + sel->gs_num_invocations * sel->gs_max_out_vertices > 256; break; case PIPE_SHADER_TESS_CTRL: @@ -2414,6 +2821,14 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->esgs_itemsize += 4; assert(((sel->esgs_itemsize / 4) & C_028AAC_ITEMSIZE) == 0); + + /* Only for TES: */ + if (sel->info.properties[TGSI_PROPERTY_TES_POINT_MODE]) + sel->rast_prim = PIPE_PRIM_POINTS; + else if (sel->info.properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES) + sel->rast_prim = PIPE_PRIM_LINE_STRIP; + else + sel->rast_prim = PIPE_PRIM_TRIANGLES; break; case PIPE_SHADER_FRAGMENT: @@ -2447,19 +2862,13 @@ static void *si_create_shader_selector(struct pipe_context *ctx, } } break; + default:; } /* PA_CL_VS_OUT_CNTL */ - bool misc_vec_ena = - sel->info.writes_psize || sel->info.writes_edgeflag || - sel->info.writes_layer || sel->info.writes_viewport_index; - sel->pa_cl_vs_out_cntl = - S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) | - S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag) | - S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) | - S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) | - S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) | - S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena); + if (sctx->chip_class <= GFX9) + sel->pa_cl_vs_out_cntl = si_get_vs_out_cntl(sel, false); + sel->clipdist_mask = sel->info.writes_clipvertex ? SIX_BITS : sel->info.clipdist_writemask; sel->culldist_mask = sel->info.culldist_writemask << @@ -2515,7 +2924,10 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z); } - (void) mtx_init(&sel->mutex, mtx_plain); + if (sel->info.properties[TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE]) + sel->db_shader_control |= S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(1); + + (void) simple_mtx_init(&sel->mutex, mtx_plain); si_schedule_initial_compile(sctx, sel->info.processor, &sel->ready, &sel->compiler_ctx_state, sel, @@ -2584,7 +2996,10 @@ static void si_bind_vs_shader(struct pipe_context *ctx, void *state) sctx->vs_shader.cso = sel; sctx->vs_shader.current = sel ? sel->first_variant : NULL; - sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS] : 0; + sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD] : 0; + + if (si_update_ngg(sctx)) + si_shader_change_notify(sctx); si_update_common_shader_state(sctx); si_update_vs_viewport_state(sctx); @@ -2607,6 +3022,44 @@ static void si_update_tess_uses_prim_id(struct si_context *sctx) sctx->ps_shader.cso->info.uses_primid); } +bool si_update_ngg(struct si_context *sctx) +{ + if (!sctx->screen->use_ngg) { + assert(!sctx->ngg); + return false; + } + + bool new_ngg = true; + + if (sctx->gs_shader.cso && sctx->tes_shader.cso && + sctx->gs_shader.cso->tess_turns_off_ngg) { + new_ngg = false; + } else if (!sctx->screen->use_ngg_streamout) { + struct si_shader_selector *last = si_get_vs(sctx)->cso; + + if ((last && last->so.num_outputs) || + sctx->streamout.prims_gen_query_enabled) + new_ngg = false; + } + + if (new_ngg != sctx->ngg) { + /* Transitioning from NGG to legacy GS requires VGT_FLUSH on Navi10-14. + * VGT_FLUSH is also emitted at the beginning of IBs when legacy GS ring + * pointers are set. + */ + if ((sctx->family == CHIP_NAVI10 || + sctx->family == CHIP_NAVI12 || + sctx->family == CHIP_NAVI14) && + !new_ngg) + sctx->flags |= SI_CONTEXT_VGT_FLUSH; + + sctx->ngg = new_ngg; + sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */ + return true; + } + return false; +} + static void si_bind_gs_shader(struct pipe_context *ctx, void *state) { struct si_context *sctx = (struct si_context *)ctx; @@ -2614,6 +3067,7 @@ static void si_bind_gs_shader(struct pipe_context *ctx, void *state) struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx); struct si_shader_selector *sel = state; bool enable_changed = !!sctx->gs_shader.cso != !!sel; + bool ngg_changed; if (sctx->gs_shader.cso == sel) return; @@ -2623,10 +3077,12 @@ static void si_bind_gs_shader(struct pipe_context *ctx, void *state) sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL; si_update_common_shader_state(sctx); - sctx->last_rast_prim = -1; /* reset this so that it gets updated */ + sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */ - if (enable_changed) { + ngg_changed = si_update_ngg(sctx); + if (ngg_changed || enable_changed) si_shader_change_notify(sctx); + if (enable_changed) { if (sctx->ia_multi_vgt_param_key.u.uses_tess) si_update_tess_uses_prim_id(sctx); } @@ -2675,12 +3131,13 @@ static void si_bind_tes_shader(struct pipe_context *ctx, void *state) si_update_tess_uses_prim_id(sctx); si_update_common_shader_state(sctx); - sctx->last_rast_prim = -1; /* reset this so that it gets updated */ + sctx->last_gs_out_prim = -1; /* reset this so that it gets updated */ - if (enable_changed) { + bool ngg_changed = si_update_ngg(sctx); + if (ngg_changed || enable_changed) si_shader_change_notify(sctx); + if (enable_changed) sctx->last_tes_sh_base = -1; /* invalidate derived tess state */ - } si_update_vs_viewport_state(sctx); si_set_active_descriptors_for_shader(sctx, sel); si_update_streamout_state(sctx); @@ -2731,6 +3188,11 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) util_queue_fence_destroy(&shader->ready); if (shader->pm4) { + /* If destroyed shaders were not unbound, the next compiled + * shader variant could get the same pointer address and so + * binding it to the same shader stage would be considered + * a no-op, causing random behavior. + */ switch (shader->selector->type) { case PIPE_SHADER_VERTEX: if (shader->key.as_ls) { @@ -2739,6 +3201,8 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) } else if (shader->key.as_es) { assert(sctx->chip_class <= GFX8); si_pm4_delete_state(sctx, es, shader->pm4); + } else if (shader->key.as_ngg) { + si_pm4_delete_state(sctx, gs, shader->pm4); } else { si_pm4_delete_state(sctx, vs, shader->pm4); } @@ -2750,6 +3214,8 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) if (shader->key.as_es) { assert(sctx->chip_class <= GFX8); si_pm4_delete_state(sctx, es, shader->pm4); + } else if (shader->key.as_ngg) { + si_pm4_delete_state(sctx, gs, shader->pm4); } else { si_pm4_delete_state(sctx, vs, shader->pm4); } @@ -2763,6 +3229,7 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) case PIPE_SHADER_FRAGMENT: si_pm4_delete_state(sctx, ps, shader->pm4); break; + default:; } } @@ -2802,13 +3269,15 @@ void si_destroy_shader_selector(struct si_context *sctx, si_delete_shader(sctx, sel->main_shader_part_ls); if (sel->main_shader_part_es) si_delete_shader(sctx, sel->main_shader_part_es); + if (sel->main_shader_part_ngg) + si_delete_shader(sctx, sel->main_shader_part_ngg); if (sel->gs_copy_shader) si_delete_shader(sctx, sel->gs_copy_shader); util_queue_fence_destroy(&sel->ready); - mtx_destroy(&sel->mutex); - free(sel->tokens); + simple_mtx_destroy(&sel->mutex); ralloc_free(sel->nir); + free(sel->nir_binary); free(sel); } @@ -2828,7 +3297,8 @@ static unsigned si_get_ps_input_cntl(struct si_context *sctx, unsigned j, offset, ps_input_cntl = 0; if (interpolate == TGSI_INTERPOLATE_CONSTANT || - (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade)) + (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade) || + name == TGSI_SEMANTIC_PRIMID) ps_input_cntl |= S_028644_FLAT_SHADE(1); if (name == TGSI_SEMANTIC_PCOORD || @@ -2863,8 +3333,8 @@ static unsigned si_get_ps_input_cntl(struct si_context *sctx, } } - if (name == TGSI_SEMANTIC_PRIMID) - /* PrimID is written after the last output. */ + if (j == vsinfo->num_outputs && name == TGSI_SEMANTIC_PRIMID) + /* PrimID is written after the last output when HW VS is used. */ ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[vsinfo->num_outputs]); else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) { /* No corresponding output found, load defaults into input. @@ -3012,7 +3482,8 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) pipe_aligned_buffer_create(sctx->b.screen, SI_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT, - esgs_ring_size, alignment); + esgs_ring_size, + sctx->screen->info.pte_fragment_size); if (!sctx->esgs_ring) return false; } @@ -3023,7 +3494,8 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) pipe_aligned_buffer_create(sctx->b.screen, SI_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT, - gsvs_ring_size, alignment); + gsvs_ring_size, + sctx->screen->info.pte_fragment_size); if (!sctx->gsvs_ring) return false; } @@ -3086,18 +3558,18 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) static void si_shader_lock(struct si_shader *shader) { - mtx_lock(&shader->selector->mutex); + simple_mtx_lock(&shader->selector->mutex); if (shader->previous_stage_sel) { assert(shader->previous_stage_sel != shader->selector); - mtx_lock(&shader->previous_stage_sel->mutex); + simple_mtx_lock(&shader->previous_stage_sel->mutex); } } static void si_shader_unlock(struct si_shader *shader) { if (shader->previous_stage_sel) - mtx_unlock(&shader->previous_stage_sel->mutex); - mtx_unlock(&shader->selector->mutex); + simple_mtx_unlock(&shader->previous_stage_sel->mutex); + simple_mtx_unlock(&shader->selector->mutex); } /** @@ -3109,7 +3581,6 @@ static int si_update_scratch_buffer(struct si_context *sctx, struct si_shader *shader) { uint64_t scratch_va = sctx->scratch_buffer->gpu_address; - int r; if (!shader) return 0; @@ -3134,16 +3605,10 @@ static int si_update_scratch_buffer(struct si_context *sctx, assert(sctx->scratch_buffer); - if (shader->previous_stage) - si_shader_apply_scratch_relocs(shader->previous_stage, scratch_va); - - si_shader_apply_scratch_relocs(shader, scratch_va); - /* Replace the shader bo with a new bo that has the relocs applied. */ - r = si_shader_binary_upload(sctx->screen, shader); - if (r) { + if (!si_shader_binary_upload(sctx->screen, shader, scratch_va)) { si_shader_unlock(shader); - return r; + return -1; } /* Update the shader state to use the new shader bo. */ @@ -3155,11 +3620,6 @@ static int si_update_scratch_buffer(struct si_context *sctx, return 1; } -static unsigned si_get_current_scratch_buffer_size(struct si_context *sctx) -{ - return sctx->scratch_buffer ? sctx->scratch_buffer->b.b.width0 : 0; -} - static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader) { return shader ? shader->config.scratch_bytes_per_wave : 0; @@ -3174,23 +3634,6 @@ static struct si_shader *si_get_tcs_current(struct si_context *sctx) sctx->fixed_func_tcs_shader.current; } -static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx) -{ - unsigned bytes = 0; - - bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current)); - bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current)); - bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current)); - bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current)); - - if (sctx->tes_shader.cso) { - struct si_shader *tcs = si_get_tcs_current(sctx); - - bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(tcs)); - } - return bytes; -} - static bool si_update_scratch_relocs(struct si_context *sctx) { struct si_shader *tcs = si_get_tcs_current(sctx); @@ -3224,10 +3667,12 @@ static bool si_update_scratch_relocs(struct si_context *sctx) if (r < 0) return false; if (r == 1) { - if (sctx->tes_shader.current) + if (sctx->vs_shader.current->key.as_ls) si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); - else if (sctx->gs_shader.current) + else if (sctx->vs_shader.current->key.as_es) si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); + else if (sctx->vs_shader.current->key.as_ngg) + si_pm4_bind_state(sctx, gs, sctx->vs_shader.current->pm4); else si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); } @@ -3237,8 +3682,10 @@ static bool si_update_scratch_relocs(struct si_context *sctx) if (r < 0) return false; if (r == 1) { - if (sctx->gs_shader.current) + if (sctx->tes_shader.current->key.as_es) si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); + else if (sctx->tes_shader.current->key.as_ngg) + si_pm4_bind_state(sctx, gs, sctx->tes_shader.current->pm4); else si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); } @@ -3248,24 +3695,49 @@ static bool si_update_scratch_relocs(struct si_context *sctx) static bool si_update_spi_tmpring_size(struct si_context *sctx) { - unsigned current_scratch_buffer_size = - si_get_current_scratch_buffer_size(sctx); - unsigned scratch_bytes_per_wave = - si_get_max_scratch_bytes_per_wave(sctx); - unsigned scratch_needed_size = scratch_bytes_per_wave * - sctx->scratch_waves; + /* SPI_TMPRING_SIZE.WAVESIZE must be constant for each scratch buffer. + * There are 2 cases to handle: + * + * - If the current needed size is less than the maximum seen size, + * use the maximum seen size, so that WAVESIZE remains the same. + * + * - If the current needed size is greater than the maximum seen size, + * the scratch buffer is reallocated, so we can increase WAVESIZE. + * + * Shaders that set SCRATCH_EN=0 don't allocate scratch space. + * Otherwise, the number of waves that can use scratch is + * SPI_TMPRING_SIZE.WAVES. + */ + unsigned bytes = 0; + + bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current)); + bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current)); + bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current)); + + if (sctx->tes_shader.cso) { + bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current)); + bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(si_get_tcs_current(sctx))); + } + + sctx->max_seen_scratch_bytes_per_wave = + MAX2(sctx->max_seen_scratch_bytes_per_wave, bytes); + + unsigned scratch_needed_size = + sctx->max_seen_scratch_bytes_per_wave * sctx->scratch_waves; unsigned spi_tmpring_size; if (scratch_needed_size > 0) { - if (scratch_needed_size > current_scratch_buffer_size) { + if (!sctx->scratch_buffer || + scratch_needed_size > sctx->scratch_buffer->b.b.width0) { /* Create a bigger scratch buffer */ si_resource_reference(&sctx->scratch_buffer, NULL); sctx->scratch_buffer = si_aligned_buffer_create(&sctx->screen->b, - SI_RESOURCE_FLAG_UNMAPPABLE, - PIPE_USAGE_DEFAULT, - scratch_needed_size, 256); + SI_RESOURCE_FLAG_UNMAPPABLE, + PIPE_USAGE_DEFAULT, + scratch_needed_size, + sctx->screen->info.pte_fragment_size); if (!sctx->scratch_buffer) return false; @@ -3283,7 +3755,7 @@ static bool si_update_spi_tmpring_size(struct si_context *sctx) "scratch size should already be aligned correctly."); spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) | - S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10); + S_0286E8_WAVESIZE(sctx->max_seen_scratch_bytes_per_wave >> 10); if (spi_tmpring_size != sctx->spi_tmpring_size) { sctx->spi_tmpring_size = spi_tmpring_size; si_mark_atom_dirty(sctx, &sctx->atoms.s.scratch_state); @@ -3294,6 +3766,7 @@ static bool si_update_spi_tmpring_size(struct si_context *sctx) static void si_init_tess_factor_ring(struct si_context *sctx) { assert(!sctx->tess_rings); + assert(((sctx->screen->tess_factor_ring_size / 4) & C_030938_SIZE) == 0); /* The address must be aligned to 2^19, because the shader only * receives the high 13 bits. @@ -3321,7 +3794,10 @@ static void si_init_tess_factor_ring(struct si_context *sctx) S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4)); si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE, factor_va >> 8); - if (sctx->chip_class >= GFX9) + if (sctx->chip_class >= GFX10) + si_pm4_set_reg(sctx->init_config, R_030984_VGT_TF_MEMORY_BASE_HI_UMD, + S_030984_BASE_HI(factor_va >> 40)); + else if (sctx->chip_class == GFX9) si_pm4_set_reg(sctx->init_config, R_030944_VGT_TF_MEMORY_BASE_HI, S_030944_BASE_HI(factor_va >> 40)); si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM, @@ -3343,39 +3819,57 @@ static void si_init_tess_factor_ring(struct si_context *sctx) si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); } -static void si_update_vgt_shader_config(struct si_context *sctx) +static struct si_pm4_state *si_build_vgt_shader_config(struct si_screen *screen, + union si_vgt_stages_key key) { - /* Calculate the index of the config. - * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */ - unsigned index = 2*!!sctx->tes_shader.cso + !!sctx->gs_shader.cso; - struct si_pm4_state **pm4 = &sctx->vgt_shader_config[index]; + struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state); + uint32_t stages = 0; + + if (key.u.tess) { + stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | + S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1); + + if (key.u.gs) + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | + S_028B54_GS_EN(1); + else if (key.u.ngg) + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS); + else + stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS); + } else if (key.u.gs) { + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | + S_028B54_GS_EN(1); + } else if (key.u.ngg) { + stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL); + } - if (!*pm4) { - uint32_t stages = 0; + if (key.u.ngg) { + stages |= S_028B54_PRIMGEN_EN(1) | + S_028B54_NGG_WAVE_ID_EN(key.u.streamout) | + S_028B54_PRIMGEN_PASSTHRU_EN(key.u.ngg_passthrough); + } else if (key.u.gs) + stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); - *pm4 = CALLOC_STRUCT(si_pm4_state); + if (screen->info.chip_class >= GFX9) + stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2); - if (sctx->tes_shader.cso) { - stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | - S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1); + if (screen->info.chip_class >= GFX10 && screen->ge_wave_size == 32) { + stages |= S_028B54_HS_W32_EN(1) | + S_028B54_GS_W32_EN(key.u.ngg) | /* legacy GS only supports Wave64 */ + S_028B54_VS_W32_EN(1); + } - if (sctx->gs_shader.cso) - stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | - S_028B54_GS_EN(1) | - S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); - else - stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS); - } else if (sctx->gs_shader.cso) { - stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | - S_028B54_GS_EN(1) | - S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); - } + si_pm4_set_reg(pm4, R_028B54_VGT_SHADER_STAGES_EN, stages); + return pm4; +} - if (sctx->chip_class >= GFX9) - stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2); +static void si_update_vgt_shader_config(struct si_context *sctx, + union si_vgt_stages_key key) +{ + struct si_pm4_state **pm4 = &sctx->vgt_shader_config[key.index]; - si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages); - } + if (unlikely(!*pm4)) + *pm4 = si_build_vgt_shader_config(sctx->screen, key); si_pm4_bind_state(sctx, vgt_shader_config, *pm4); } @@ -3387,15 +3881,31 @@ bool si_update_shaders(struct si_context *sctx) struct si_shader *old_vs = si_get_vs_state(sctx); bool old_clip_disable = old_vs ? old_vs->key.opt.clip_disable : false; struct si_shader *old_ps = sctx->ps_shader.current; + union si_vgt_stages_key key; unsigned old_spi_shader_col_format = old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0; int r; + if (!sctx->compiler.passes) + si_init_compiler(sctx->screen, &sctx->compiler); + compiler_state.compiler = &sctx->compiler; compiler_state.debug = sctx->debug; compiler_state.is_debug_context = sctx->is_debug; - /* Update stages before GS. */ + key.index = 0; + + if (sctx->tes_shader.cso) + key.u.tess = 1; + if (sctx->gs_shader.cso) + key.u.gs = 1; + + if (sctx->ngg) { + key.u.ngg = 1; + key.u.streamout = !!si_get_vs(sctx)->cso->so.num_outputs; + } + + /* Update TCS and TES. */ if (sctx->tes_shader.cso) { if (!sctx->tess_rings) { si_init_tess_factor_ring(sctx); @@ -3403,17 +3913,8 @@ bool si_update_shaders(struct si_context *sctx) return false; } - /* VS as LS */ - if (sctx->chip_class <= GFX8) { - r = si_shader_select(ctx, &sctx->vs_shader, - &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); - } - if (sctx->tcs_shader.cso) { - r = si_shader_select(ctx, &sctx->tcs_shader, + r = si_shader_select(ctx, &sctx->tcs_shader, key, &compiler_state); if (r) return false; @@ -3427,69 +3928,82 @@ bool si_update_shaders(struct si_context *sctx) } r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader, - &compiler_state); + key, &compiler_state); if (r) return false; si_pm4_bind_state(sctx, hs, sctx->fixed_func_tcs_shader.current->pm4); } - if (sctx->gs_shader.cso) { - /* TES as ES */ - if (sctx->chip_class <= GFX8) { - r = si_shader_select(ctx, &sctx->tes_shader, - &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); - } - } else { - /* TES as VS */ - r = si_shader_select(ctx, &sctx->tes_shader, - &compiler_state); + if (!sctx->gs_shader.cso || sctx->chip_class <= GFX8) { + r = si_shader_select(ctx, &sctx->tes_shader, key, &compiler_state); if (r) return false; - si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); - } - } else if (sctx->gs_shader.cso) { - if (sctx->chip_class <= GFX8) { - /* VS as ES */ - r = si_shader_select(ctx, &sctx->vs_shader, - &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); - si_pm4_bind_state(sctx, ls, NULL); - si_pm4_bind_state(sctx, hs, NULL); + if (sctx->gs_shader.cso) { + /* TES as ES */ + assert(sctx->chip_class <= GFX8); + si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); + } else if (key.u.ngg) { + si_pm4_bind_state(sctx, gs, sctx->tes_shader.current->pm4); + } else { + si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); + } } } else { - /* VS as VS */ - r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state); - if (r) - return false; - si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); - si_pm4_bind_state(sctx, ls, NULL); + if (sctx->chip_class <= GFX8) + si_pm4_bind_state(sctx, ls, NULL); si_pm4_bind_state(sctx, hs, NULL); } /* Update GS. */ if (sctx->gs_shader.cso) { - r = si_shader_select(ctx, &sctx->gs_shader, &compiler_state); + r = si_shader_select(ctx, &sctx->gs_shader, key, &compiler_state); if (r) return false; si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4); - si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4); + if (!key.u.ngg) { + si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4); - if (!si_update_gs_ring_buffers(sctx)) - return false; + if (!si_update_gs_ring_buffers(sctx)) + return false; + } else { + si_pm4_bind_state(sctx, vs, NULL); + } } else { - si_pm4_bind_state(sctx, gs, NULL); - if (sctx->chip_class <= GFX8) - si_pm4_bind_state(sctx, es, NULL); + if (!key.u.ngg) { + si_pm4_bind_state(sctx, gs, NULL); + if (sctx->chip_class <= GFX8) + si_pm4_bind_state(sctx, es, NULL); + } + } + + /* Update VS. */ + if ((!key.u.tess && !key.u.gs) || sctx->chip_class <= GFX8) { + r = si_shader_select(ctx, &sctx->vs_shader, key, &compiler_state); + if (r) + return false; + + if (!key.u.tess && !key.u.gs) { + if (key.u.ngg) { + si_pm4_bind_state(sctx, gs, sctx->vs_shader.current->pm4); + si_pm4_bind_state(sctx, vs, NULL); + } else { + si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); + } + } else if (sctx->tes_shader.cso) { + si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); + } else { + assert(sctx->gs_shader.cso); + si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); + } } - si_update_vgt_shader_config(sctx); + /* This must be done after the shader variant is selected. */ + if (sctx->ngg) + key.u.ngg_passthrough = gfx10_is_ngg_passthrough(si_get_vs(sctx)->current); + + si_update_vgt_shader_config(sctx, key); if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable) si_mark_atom_dirty(sctx, &sctx->atoms.s.clip_regs); @@ -3497,7 +4011,7 @@ bool si_update_shaders(struct si_context *sctx) if (sctx->ps_shader.cso) { unsigned db_shader_control; - r = si_shader_select(ctx, &sctx->ps_shader, &compiler_state); + r = si_shader_select(ctx, &sctx->ps_shader, key, &compiler_state); if (r) return false; si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4); @@ -3506,7 +4020,9 @@ bool si_update_shaders(struct si_context *sctx) sctx->ps_shader.cso->db_shader_control | S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS); - if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs) || + if (si_pm4_state_changed(sctx, ps) || + si_pm4_state_changed(sctx, vs) || + (key.u.ngg && si_pm4_state_changed(sctx, gs)) || sctx->sprite_coord_enable != rs->sprite_coord_enable || sctx->flatshade != rs->flatshade) { sctx->sprite_coord_enable = rs->sprite_coord_enable; @@ -3514,7 +4030,7 @@ bool si_update_shaders(struct si_context *sctx) si_mark_atom_dirty(sctx, &sctx->atoms.s.spi_map); } - if (sctx->screen->rbplus_allowed && + if (sctx->screen->info.rbplus_allowed && si_pm4_state_changed(sctx, ps) && (!old_ps || old_spi_shader_col_format !=