* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Christian König <christian.koenig@amd.com>
- * Marek Olšák <maraeo@gmail.com>
*/
#include "si_pipe.h"
#include "sid.h"
+#include "gfx9d.h"
#include "radeon/r600_cs.h"
#include "tgsi/tgsi_parse.h"
#include "tgsi/tgsi_ureg.h"
#include "util/hash_table.h"
-#include "util/u_hash.h"
+#include "util/crc32.h"
+#include "util/u_async_debug.h"
#include "util/u_memory.h"
#include "util/u_prim.h"
+#include "util/disk_cache.h"
+#include "util/mesa-sha1.h"
+#include "ac_exp_param.h"
+
/* SHADER_CACHE */
/**
/* There is always a size of data followed by the data itself. */
unsigned relocs_size = shader->binary.reloc_count *
sizeof(shader->binary.relocs[0]);
- unsigned disasm_size = strlen(shader->binary.disasm_string) + 1;
+ unsigned disasm_size = shader->binary.disasm_string ?
+ strlen(shader->binary.disasm_string) + 1 : 0;
unsigned llvm_ir_size = shader->binary.llvm_ir_string ?
strlen(shader->binary.llvm_ir_string) + 1 : 0;
unsigned size =
*/
static bool si_shader_cache_insert_shader(struct si_screen *sscreen,
void *tgsi_binary,
- struct si_shader *shader)
+ struct si_shader *shader,
+ bool insert_into_disk_cache)
{
void *hw_binary;
struct hash_entry *entry;
+ uint8_t key[CACHE_KEY_SIZE];
entry = _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
if (entry)
return false;
}
+ if (sscreen->b.disk_shader_cache && insert_into_disk_cache) {
+ disk_cache_compute_key(sscreen->b.disk_shader_cache, tgsi_binary,
+ *((uint32_t *)tgsi_binary), key);
+ disk_cache_put(sscreen->b.disk_shader_cache, key, hw_binary,
+ *((uint32_t *) hw_binary), NULL);
+ }
+
return true;
}
{
struct hash_entry *entry =
_mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
- if (!entry)
- return false;
+ if (!entry) {
+ if (sscreen->b.disk_shader_cache) {
+ unsigned char sha1[CACHE_KEY_SIZE];
+ size_t tg_size = *((uint32_t *) tgsi_binary);
+
+ disk_cache_compute_key(sscreen->b.disk_shader_cache,
+ tgsi_binary, tg_size, sha1);
+
+ size_t binary_size;
+ uint8_t *buffer =
+ disk_cache_get(sscreen->b.disk_shader_cache,
+ sha1, &binary_size);
+ if (!buffer)
+ return false;
- if (!si_load_shader_binary(shader, entry->data))
- return false;
+ if (binary_size < sizeof(uint32_t) ||
+ *((uint32_t*)buffer) != binary_size) {
+ /* Something has gone wrong discard the item
+ * from the cache and rebuild/link from
+ * source.
+ */
+ assert(!"Invalid radeonsi shader disk cache "
+ "item!");
+
+ disk_cache_remove(sscreen->b.disk_shader_cache,
+ sha1);
+ free(buffer);
+ return false;
+ }
+
+ if (!si_load_shader_binary(shader, buffer)) {
+ free(buffer);
+ return false;
+ }
+ free(buffer);
+
+ if (!si_shader_cache_insert_shader(sscreen, tgsi_binary,
+ shader, false))
+ FREE(tgsi_binary);
+ } else {
+ return false;
+ }
+ } else {
+ if (si_load_shader_binary(shader, entry->data))
+ FREE(tgsi_binary);
+ else
+ return false;
+ }
p_atomic_inc(&sscreen->b.num_shader_cache_hits);
return true;
}
bool si_init_shader_cache(struct si_screen *sscreen)
{
- pipe_mutex_init(sscreen->shader_cache_mutex);
+ (void) mtx_init(&sscreen->shader_cache_mutex, mtx_plain);
sscreen->shader_cache =
_mesa_hash_table_create(NULL,
si_shader_cache_key_hash,
si_shader_cache_key_equals);
+
return sscreen->shader_cache != NULL;
}
if (sscreen->shader_cache)
_mesa_hash_table_destroy(sscreen->shader_cache,
si_destroy_shader_cache_entry);
- pipe_mutex_destroy(sscreen->shader_cache_mutex);
+ mtx_destroy(&sscreen->shader_cache_mutex);
}
/* SHADER STATES */
static void si_set_tesseval_regs(struct si_screen *sscreen,
- struct si_shader *shader,
+ struct si_shader_selector *tes,
struct si_pm4_state *pm4)
{
- struct tgsi_shader_info *info = &shader->selector->info;
+ struct tgsi_shader_info *info = &tes->info;
unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE];
unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING];
bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW];
S_028B6C_DISTRIBUTION_MODE(distribution_mode));
}
+/* Polaris needs different VTX_REUSE_DEPTH settings depending on
+ * whether the "fractional odd" tessellation spacing is used.
+ *
+ * Possible VGT configurations and which state should set the register:
+ *
+ * Reg set in | VGT shader configuration | Value
+ * ------------------------------------------------------
+ * VS as VS | VS | 30
+ * VS as ES | ES -> GS -> VS | 30
+ * TES as VS | LS -> HS -> VS | 14 or 30
+ * TES as ES | LS -> HS -> ES -> GS -> VS | 14 or 30
+ *
+ * If "shader" is NULL, it's assumed it's not LS or GS copy shader.
+ */
+static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen,
+ struct si_shader_selector *sel,
+ struct si_shader *shader,
+ struct si_pm4_state *pm4)
+{
+ unsigned type = sel->type;
+
+ if (sscreen->b.family < CHIP_POLARIS10)
+ return;
+
+ /* VS as VS, or VS as ES: */
+ if ((type == PIPE_SHADER_VERTEX &&
+ (!shader ||
+ (!shader->key.as_ls && !shader->is_gs_copy_shader))) ||
+ /* TES as VS, or TES as ES: */
+ type == PIPE_SHADER_TESS_EVAL) {
+ unsigned vtx_reuse_depth = 30;
+
+ if (type == PIPE_SHADER_TESS_EVAL &&
+ sel->info.properties[TGSI_PROPERTY_TES_SPACING] ==
+ PIPE_TESS_SPACING_FRACTIONAL_ODD)
+ vtx_reuse_depth = 14;
+
+ si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
+ vtx_reuse_depth);
+ }
+}
+
static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader)
{
if (shader->pm4)
return shader->pm4;
}
-static void si_shader_ls(struct si_shader *shader)
+static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_pm4_state *pm4;
unsigned vgpr_comp_cnt;
uint64_t va;
+ assert(sscreen->b.chip_class <= VI);
+
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
return;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
/* We need at least 2 components for LS.
- * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */
- vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 1;
+ * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, va >> 40);
S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
S_00B528_DX10_CLAMP(1) |
S_00B528_FLOAT_MODE(shader->config.float_mode);
- shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_LS_NUM_USER_SGPR) |
+ shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_VS_NUM_USER_SGPR) |
S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
}
-static void si_shader_hs(struct si_shader *shader)
+static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_pm4_state *pm4;
uint64_t va;
+ unsigned ls_vgpr_comp_cnt = 0;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
- si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
+ if (sscreen->b.chip_class >= GFX9) {
+ si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, va >> 40);
+
+ /* We need at least 2 components for LS.
+ * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ ls_vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
+
+ shader->config.rsrc2 =
+ S_00B42C_USER_SGPR(GFX9_TCS_NUM_USER_SGPR) |
+ S_00B42C_USER_SGPR_MSB(GFX9_TCS_NUM_USER_SGPR >> 5) |
+ S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
+ } else {
+ si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
+
+ shader->config.rsrc2 =
+ S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR) |
+ S_00B42C_OC_LDS_EN(1) |
+ S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
+ }
+
si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) |
S_00B428_DX10_CLAMP(1) |
- S_00B428_FLOAT_MODE(shader->config.float_mode));
- si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
- S_00B42C_USER_SGPR(SI_TCS_NUM_USER_SGPR) |
- S_00B42C_OC_LDS_EN(1) |
- S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+ S_00B428_FLOAT_MODE(shader->config.float_mode) |
+ S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt));
+
+ if (sscreen->b.chip_class <= VI) {
+ si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
+ shader->config.rsrc2);
+ }
}
static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
uint64_t va;
unsigned oc_lds_en;
+ assert(sscreen->b.chip_class <= VI);
+
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
return;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
if (shader->selector->type == PIPE_SHADER_VERTEX) {
- vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 0;
- num_user_sgprs = SI_ES_NUM_USER_SGPR;
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
+ vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
+ num_user_sgprs = SI_VS_NUM_USER_SGPR;
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
- vgpr_comp_cnt = 3; /* all components are needed for TES */
+ vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
num_user_sgprs = SI_TES_NUM_USER_SGPR;
} else
unreachable("invalid shader selector type");
S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
- si_set_tesseval_regs(sscreen, shader, pm4);
+ si_set_tesseval_regs(sscreen, shader->selector, pm4);
+
+ polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
}
/**
* Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
* geometry shader.
*/
-static uint32_t si_vgt_gs_mode(struct si_shader *shader)
+static uint32_t si_vgt_gs_mode(struct si_shader_selector *sel)
{
- unsigned gs_max_vert_out = shader->selector->gs_max_out_vertices;
+ enum chip_class chip_class = sel->screen->b.chip_class;
+ unsigned gs_max_vert_out = sel->gs_max_out_vertices;
unsigned cut_mode;
if (gs_max_vert_out <= 128) {
return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
S_028A40_CUT_MODE(cut_mode)|
- S_028A40_ES_WRITE_OPTIMIZE(1) |
- S_028A40_GS_WRITE_OPTIMIZE(1);
+ S_028A40_ES_WRITE_OPTIMIZE(chip_class <= VI) |
+ S_028A40_GS_WRITE_OPTIMIZE(1) |
+ S_028A40_ONCHIP(chip_class >= GFX9 ? 1 : 0);
+}
+
+struct gfx9_gs_info {
+ unsigned es_verts_per_subgroup;
+ unsigned gs_prims_per_subgroup;
+ unsigned gs_inst_prims_in_subgroup;
+ unsigned max_prims_per_subgroup;
+ unsigned lds_size;
+};
+
+static void gfx9_get_gs_info(struct si_shader_selector *es,
+ struct si_shader_selector *gs,
+ struct gfx9_gs_info *out)
+{
+ unsigned gs_num_invocations = MAX2(gs->gs_num_invocations, 1);
+ unsigned input_prim = gs->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
+ bool uses_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
+ input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
+
+ /* All these are in dwords: */
+ /* We can't allow using the whole LDS, because GS waves compete with
+ * other shader stages for LDS space. */
+ const unsigned max_lds_size = 8 * 1024;
+ const unsigned esgs_itemsize = es->esgs_itemsize / 4;
+ unsigned esgs_lds_size;
+
+ /* All these are per subgroup: */
+ const unsigned max_out_prims = 32 * 1024;
+ const unsigned max_es_verts = 255;
+ const unsigned ideal_gs_prims = 64;
+ unsigned max_gs_prims, gs_prims;
+ unsigned min_es_verts, es_verts, worst_case_es_verts;
+
+ assert(gs_num_invocations <= 32); /* GL maximum */
+
+ if (uses_adjacency || gs_num_invocations > 1)
+ max_gs_prims = 127 / gs_num_invocations;
+ else
+ max_gs_prims = 255;
+
+ /* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
+ * Make sure we don't go over the maximum value.
+ */
+ if (gs->gs_max_out_vertices > 0) {
+ max_gs_prims = MIN2(max_gs_prims,
+ max_out_prims /
+ (gs->gs_max_out_vertices * gs_num_invocations));
+ }
+ assert(max_gs_prims > 0);
+
+ /* If the primitive has adjacency, halve the number of vertices
+ * that will be reused in multiple primitives.
+ */
+ min_es_verts = gs->gs_input_verts_per_prim / (uses_adjacency ? 2 : 1);
+
+ gs_prims = MIN2(ideal_gs_prims, max_gs_prims);
+ worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
+
+ /* Compute ESGS LDS size based on the worst case number of ES vertices
+ * needed to create the target number of GS prims per subgroup.
+ */
+ esgs_lds_size = esgs_itemsize * worst_case_es_verts;
+
+ /* If total LDS usage is too big, refactor partitions based on ratio
+ * of ESGS item sizes.
+ */
+ if (esgs_lds_size > max_lds_size) {
+ /* Our target GS Prims Per Subgroup was too large. Calculate
+ * the maximum number of GS Prims Per Subgroup that will fit
+ * into LDS, capped by the maximum that the hardware can support.
+ */
+ gs_prims = MIN2((max_lds_size / (esgs_itemsize * min_es_verts)),
+ max_gs_prims);
+ assert(gs_prims > 0);
+ worst_case_es_verts = MIN2(min_es_verts * gs_prims,
+ max_es_verts);
+
+ esgs_lds_size = esgs_itemsize * worst_case_es_verts;
+ assert(esgs_lds_size <= max_lds_size);
+ }
+
+ /* Now calculate remaining ESGS information. */
+ if (esgs_lds_size)
+ es_verts = MIN2(esgs_lds_size / esgs_itemsize, max_es_verts);
+ else
+ es_verts = max_es_verts;
+
+ /* Vertices for adjacency primitives are not always reused, so restore
+ * it for ES_VERTS_PER_SUBGRP.
+ */
+ min_es_verts = gs->gs_input_verts_per_prim;
+
+ /* For normal primitives, the VGT only checks if they are past the ES
+ * verts per subgroup after allocating a full GS primitive and if they
+ * are, kick off a new subgroup. But if those additional ES verts are
+ * unique (e.g. not reused) we need to make sure there is enough LDS
+ * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
+ */
+ es_verts -= min_es_verts - 1;
+
+ out->es_verts_per_subgroup = es_verts;
+ out->gs_prims_per_subgroup = gs_prims;
+ out->gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations;
+ out->max_prims_per_subgroup = out->gs_inst_prims_in_subgroup *
+ gs->gs_max_out_vertices;
+ out->lds_size = align(esgs_lds_size, 128) / 128;
+
+ assert(out->max_prims_per_subgroup <= max_out_prims);
}
-static void si_shader_gs(struct si_shader *shader)
+static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
{
- unsigned gs_vert_itemsize = shader->selector->gsvs_vertex_size;
- unsigned gsvs_itemsize = shader->selector->max_gsvs_emit_size >> 2;
- unsigned gs_num_invocations = shader->selector->gs_num_invocations;
+ struct si_shader_selector *sel = shader->selector;
+ const ubyte *num_components = sel->info.num_stream_output_components;
+ unsigned gs_num_invocations = sel->gs_num_invocations;
struct si_pm4_state *pm4;
uint64_t va;
- unsigned max_stream = shader->selector->max_gs_stream;
-
- /* The GSVS_RING_ITEMSIZE register takes 15 bits */
- assert(gsvs_itemsize < (1 << 15));
+ unsigned max_stream = sel->max_gs_stream;
+ unsigned offset;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
return;
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(shader));
-
- si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, gsvs_itemsize);
- si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, gsvs_itemsize * ((max_stream >= 2) ? 2 : 1));
- si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, gsvs_itemsize * ((max_stream >= 3) ? 3 : 1));
+ offset = num_components[0] * sel->gs_max_out_vertices;
+ si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, offset);
+ if (max_stream >= 1)
+ offset += num_components[1] * sel->gs_max_out_vertices;
+ si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, offset);
+ if (max_stream >= 2)
+ offset += num_components[2] * sel->gs_max_out_vertices;
+ si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, offset);
+ if (max_stream >= 3)
+ offset += num_components[3] * sel->gs_max_out_vertices;
+ si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset);
- si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, gsvs_itemsize * (max_stream + 1));
+ /* The GSVS_RING_ITEMSIZE register takes 15 bits */
+ assert(offset < (1 << 15));
- si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, shader->selector->gs_max_out_vertices);
+ si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, sel->gs_max_out_vertices);
- si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, gs_vert_itemsize >> 2);
- si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? gs_vert_itemsize >> 2 : 0);
- si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? gs_vert_itemsize >> 2 : 0);
- si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? gs_vert_itemsize >> 2 : 0);
+ si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, num_components[0]);
+ si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? num_components[1] : 0);
+ si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? num_components[2] : 0);
+ si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? num_components[3] : 0);
si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT,
S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
- si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
- si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
- S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
- S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B228_DX10_CLAMP(1) |
- S_00B228_FLOAT_MODE(shader->config.float_mode));
- si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
- S_00B22C_USER_SGPR(SI_GS_NUM_USER_SGPR) |
- S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+ if (sscreen->b.chip_class >= GFX9) {
+ unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
+ unsigned es_type = shader->key.part.gs.es->type;
+ unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
+ struct gfx9_gs_info gs_info;
+
+ if (es_type == PIPE_SHADER_VERTEX)
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
+ es_vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
+ else if (es_type == PIPE_SHADER_TESS_EVAL)
+ es_vgpr_comp_cnt = shader->key.part.gs.es->info.uses_primid ? 3 : 2;
+ else
+ unreachable("invalid shader selector type");
+
+ /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
+ * VGPR[0:4] are always loaded.
+ */
+ if (sel->info.uses_invocationid)
+ gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
+ else if (sel->info.uses_primid)
+ gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
+ else if (input_prim >= PIPE_PRIM_TRIANGLES)
+ gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
+ else
+ gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
+
+ gfx9_get_gs_info(shader->key.part.gs.es, sel, &gs_info);
+
+ si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
+ si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, va >> 40);
+
+ si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
+ S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
+ S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
+ S_00B228_DX10_CLAMP(1) |
+ S_00B228_FLOAT_MODE(shader->config.float_mode) |
+ S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
+ si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
+ S_00B22C_USER_SGPR(GFX9_GS_NUM_USER_SGPR) |
+ S_00B22C_USER_SGPR_MSB(GFX9_GS_NUM_USER_SGPR >> 5) |
+ S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
+ S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
+ S_00B22C_LDS_SIZE(gs_info.lds_size) |
+ S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+
+ si_pm4_set_reg(pm4, R_028A44_VGT_GS_ONCHIP_CNTL,
+ S_028A44_ES_VERTS_PER_SUBGRP(gs_info.es_verts_per_subgroup) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(gs_info.gs_prims_per_subgroup) |
+ S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_info.gs_inst_prims_in_subgroup));
+ si_pm4_set_reg(pm4, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
+ S_028A94_MAX_PRIMS_PER_SUBGROUP(gs_info.max_prims_per_subgroup));
+ si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
+ shader->key.part.gs.es->esgs_itemsize / 4);
+
+ if (es_type == PIPE_SHADER_TESS_EVAL)
+ si_set_tesseval_regs(sscreen, shader->key.part.gs.es, pm4);
+
+ polaris_set_vgt_vertex_reuse(sscreen, shader->key.part.gs.es,
+ NULL, pm4);
+ } else {
+ si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
+ si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
+
+ si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
+ S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
+ S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) |
+ S_00B228_DX10_CLAMP(1) |
+ S_00B228_FLOAT_MODE(shader->config.float_mode));
+ si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
+ S_00B22C_USER_SGPR(GFX6_GS_NUM_USER_SGPR) |
+ S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
+ }
}
/**
* is the copy shader.
*/
static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
- struct si_shader *gs)
+ struct si_shader_selector *gs)
{
+ const struct tgsi_shader_info *info = &shader->selector->info;
struct si_pm4_state *pm4;
unsigned num_user_sgprs;
unsigned nparams, vgpr_comp_cnt;
uint64_t va;
unsigned oc_lds_en;
unsigned window_space =
- shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
- bool enable_prim_id = si_vs_exports_prim_id(shader);
+ info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
+ bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || info->uses_primid;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
* not sent again.
*/
if (!gs) {
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
- S_028A40_MODE(enable_prim_id ? V_028A40_GS_SCENARIO_A : 0));
+ unsigned mode = V_028A40_GS_OFF;
+
+ /* PrimID needs GS scenario A. */
+ if (enable_prim_id)
+ mode = V_028A40_GS_SCENARIO_A;
+
+ si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, S_028A40_MODE(mode));
si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
} else {
si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs));
si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
}
+ if (sscreen->b.chip_class <= VI) {
+ /* Reuse needs to be set off if we write oViewport. */
+ si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF,
+ S_028AB4_REUSE_OFF(info->writes_viewport_index));
+ }
+
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */
num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR;
} else if (shader->selector->type == PIPE_SHADER_VERTEX) {
- vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : (enable_prim_id ? 2 : 0);
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
+ /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
+ * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
+ * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
+ */
+ vgpr_comp_cnt = enable_prim_id ? 2 : (shader->info.uses_instanceid ? 1 : 0);
+
+ if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) {
+ num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
+ info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+ } else {
+ num_user_sgprs = SI_VS_NUM_USER_SGPR;
+ }
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
- vgpr_comp_cnt = 3; /* all components are needed for TES */
+ vgpr_comp_cnt = enable_prim_id ? 3 : 2;
num_user_sgprs = SI_TES_NUM_USER_SGPR;
} else
unreachable("invalid shader selector type");
S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
if (shader->selector->type == PIPE_SHADER_TESS_EVAL)
- si_set_tesseval_regs(sscreen, shader, pm4);
+ si_set_tesseval_regs(sscreen, shader->selector, pm4);
+
+ polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
}
static unsigned si_get_ps_num_interp(struct si_shader *ps)
unsigned num_colors = !!(info->colors_read & 0x0f) +
!!(info->colors_read & 0xf0);
unsigned num_interp = ps->selector->info.num_inputs +
- (ps->key.ps.prolog.color_two_side ? num_colors : 0);
+ (ps->key.part.ps.prolog.color_two_side ? num_colors : 0);
assert(num_interp <= 32);
return MIN2(num_interp, 32);
static unsigned si_get_spi_shader_col_format(struct si_shader *shader)
{
- unsigned value = shader->key.ps.epilog.spi_shader_col_format;
+ unsigned value = shader->key.part.ps.epilog.spi_shader_col_format;
unsigned i, num_targets = (util_last_bit(value) + 3) / 4;
/* If the i-th target format is set, all previous target formats must
G_0286CC_PERSP_PULL_MODEL_ENA(input_ena));
/* Validate interpolation optimization flags (read as implications). */
- assert(!shader->key.ps.prolog.bc_optimize_for_persp ||
+ assert(!shader->key.part.ps.prolog.bc_optimize_for_persp ||
(G_0286CC_PERSP_CENTER_ENA(input_ena) &&
G_0286CC_PERSP_CENTROID_ENA(input_ena)));
- assert(!shader->key.ps.prolog.bc_optimize_for_linear ||
+ assert(!shader->key.part.ps.prolog.bc_optimize_for_linear ||
(G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
- assert(!shader->key.ps.prolog.force_persp_center_interp ||
+ assert(!shader->key.part.ps.prolog.force_persp_center_interp ||
(!G_0286CC_PERSP_SAMPLE_ENA(input_ena) &&
!G_0286CC_PERSP_CENTROID_ENA(input_ena)));
- assert(!shader->key.ps.prolog.force_linear_center_interp ||
+ assert(!shader->key.part.ps.prolog.force_linear_center_interp ||
(!G_0286CC_LINEAR_SAMPLE_ENA(input_ena) &&
!G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
- assert(!shader->key.ps.prolog.force_persp_sample_interp ||
+ assert(!shader->key.part.ps.prolog.force_persp_sample_interp ||
(!G_0286CC_PERSP_CENTER_ENA(input_ena) &&
!G_0286CC_PERSP_CENTROID_ENA(input_ena)));
- assert(!shader->key.ps.prolog.force_linear_sample_interp ||
+ assert(!shader->key.part.ps.prolog.force_linear_sample_interp ||
(!G_0286CC_LINEAR_CENTER_ENA(input_ena) &&
!G_0286CC_LINEAR_CENTROID_ENA(input_ena)));
/* Validate cases when the optimizations are off (read as implications). */
- assert(shader->key.ps.prolog.bc_optimize_for_persp ||
+ assert(shader->key.part.ps.prolog.bc_optimize_for_persp ||
!G_0286CC_PERSP_CENTER_ENA(input_ena) ||
!G_0286CC_PERSP_CENTROID_ENA(input_ena));
- assert(shader->key.ps.prolog.bc_optimize_for_linear ||
+ assert(shader->key.part.ps.prolog.bc_optimize_for_linear ||
!G_0286CC_LINEAR_CENTER_ENA(input_ena) ||
!G_0286CC_LINEAR_CENTROID_ENA(input_ena));
{
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
- if (shader->key.vs.as_ls)
- si_shader_ls(shader);
- else if (shader->key.vs.as_es)
+ if (shader->key.as_ls)
+ si_shader_ls(sscreen, shader);
+ else if (shader->key.as_es)
si_shader_es(sscreen, shader);
else
si_shader_vs(sscreen, shader, NULL);
break;
case PIPE_SHADER_TESS_CTRL:
- si_shader_hs(shader);
+ si_shader_hs(sscreen, shader);
break;
case PIPE_SHADER_TESS_EVAL:
- if (shader->key.tes.as_es)
+ if (shader->key.as_es)
si_shader_es(sscreen, shader);
else
si_shader_vs(sscreen, shader, NULL);
break;
case PIPE_SHADER_GEOMETRY:
- si_shader_gs(shader);
- si_shader_vs(sscreen, shader->gs_copy_shader, shader);
+ si_shader_gs(sscreen, shader);
break;
case PIPE_SHADER_FRAGMENT:
si_shader_ps(shader);
return PIPE_FUNC_ALWAYS;
}
+static void si_shader_selector_key_vs(struct si_context *sctx,
+ struct si_shader_selector *vs,
+ struct si_shader_key *key,
+ struct si_vs_prolog_bits *prolog_key)
+{
+ if (!sctx->vertex_elements)
+ return;
+
+ prolog_key->instance_divisor_is_one =
+ sctx->vertex_elements->instance_divisor_is_one;
+ prolog_key->instance_divisor_is_fetched =
+ sctx->vertex_elements->instance_divisor_is_fetched;
+
+ /* Prefer a monolithic shader to allow scheduling divisions around
+ * VBO loads. */
+ if (prolog_key->instance_divisor_is_fetched)
+ key->opt.prefer_mono = 1;
+
+ unsigned count = MIN2(vs->info.num_inputs,
+ sctx->vertex_elements->count);
+ memcpy(key->mono.vs_fix_fetch, sctx->vertex_elements->fix_fetch, count);
+}
+
+static void si_shader_selector_key_hw_vs(struct si_context *sctx,
+ struct si_shader_selector *vs,
+ struct si_shader_key *key)
+{
+ struct si_shader_selector *ps = sctx->ps_shader.cso;
+
+ key->opt.clip_disable =
+ sctx->queued.named.rasterizer->clip_plane_enable == 0 &&
+ (vs->info.clipdist_writemask ||
+ vs->info.writes_clipvertex) &&
+ !vs->info.culldist_writemask;
+
+ /* Find out if PS is disabled. */
+ bool ps_disabled = true;
+ if (ps) {
+ bool ps_modifies_zs = ps->info.uses_kill ||
+ ps->info.writes_z ||
+ ps->info.writes_stencil ||
+ ps->info.writes_samplemask ||
+ si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS;
+
+ unsigned ps_colormask = sctx->framebuffer.colorbuf_enabled_4bit &
+ sctx->queued.named.blend->cb_target_mask;
+ if (!ps->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
+ ps_colormask &= ps->colors_written_4bit;
+
+ ps_disabled = sctx->queued.named.rasterizer->rasterizer_discard ||
+ (!ps_colormask &&
+ !ps_modifies_zs &&
+ !ps->info.writes_memory);
+ }
+
+ /* Find out which VS outputs aren't used by the PS. */
+ uint64_t outputs_written = vs->outputs_written;
+ uint64_t inputs_read = 0;
+
+ /* ignore POSITION, PSIZE */
+ outputs_written &= ~((1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_POSITION, 0) |
+ (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_PSIZE, 0))));
+
+ if (!ps_disabled) {
+ inputs_read = ps->inputs_read;
+ }
+
+ uint64_t linked = outputs_written & inputs_read;
+
+ key->opt.kill_outputs = ~linked & outputs_written;
+}
+
/* Compute the key for the hw shader variant */
static inline void si_shader_selector_key(struct pipe_context *ctx,
struct si_shader_selector *sel,
- union si_shader_key *key)
+ struct si_shader_key *key)
{
struct si_context *sctx = (struct si_context *)ctx;
- unsigned i;
memset(key, 0, sizeof(*key));
switch (sel->type) {
case PIPE_SHADER_VERTEX:
- if (sctx->vertex_elements) {
- unsigned count = MIN2(sel->info.num_inputs,
- sctx->vertex_elements->count);
- for (i = 0; i < count; ++i)
- key->vs.prolog.instance_divisors[i] =
- sctx->vertex_elements->elements[i].instance_divisor;
- }
+ si_shader_selector_key_vs(sctx, sel, key, &key->part.vs.prolog);
+
if (sctx->tes_shader.cso)
- key->vs.as_ls = 1;
+ key->as_ls = 1;
else if (sctx->gs_shader.cso)
- key->vs.as_es = 1;
+ key->as_es = 1;
+ else {
+ si_shader_selector_key_hw_vs(sctx, sel, key);
- if (!sctx->gs_shader.cso && sctx->ps_shader.cso &&
- sctx->ps_shader.cso->info.uses_primid)
- key->vs.epilog.export_prim_id = 1;
+ if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
+ key->mono.u.vs_export_prim_id = 1;
+ }
break;
case PIPE_SHADER_TESS_CTRL:
- key->tcs.epilog.prim_mode =
+ if (sctx->b.chip_class >= GFX9) {
+ si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
+ key, &key->part.tcs.ls_prolog);
+ key->part.tcs.ls = sctx->vs_shader.cso;
+
+ /* When the LS VGPR fix is needed, monolithic shaders
+ * can:
+ * - avoid initializing EXEC in both the LS prolog
+ * and the LS main part when !vs_needs_prolog
+ * - remove the fixup for unused input VGPRs
+ */
+ key->part.tcs.ls_prolog.ls_vgpr_fix = sctx->ls_vgpr_fix;
+
+ /* The LS output / HS input layout can be communicated
+ * directly instead of via user SGPRs for merged LS-HS.
+ * The LS VGPR fix prefers this too.
+ */
+ key->opt.prefer_mono = 1;
+ }
+
+ key->part.tcs.epilog.prim_mode =
sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ key->part.tcs.epilog.invoc0_tess_factors_are_def =
+ sel->tcs_info.tessfactors_are_def_in_all_invocs;
+ key->part.tcs.epilog.tes_reads_tess_factors =
+ sctx->tes_shader.cso->info.reads_tess_factors;
if (sel == sctx->fixed_func_tcs_shader.cso)
- key->tcs.epilog.inputs_to_copy = sctx->vs_shader.cso->outputs_written;
+ key->mono.u.ff_tcs_inputs_to_copy = sctx->vs_shader.cso->outputs_written;
break;
case PIPE_SHADER_TESS_EVAL:
if (sctx->gs_shader.cso)
- key->tes.as_es = 1;
- else if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
- key->tes.epilog.export_prim_id = 1;
+ key->as_es = 1;
+ else {
+ si_shader_selector_key_hw_vs(sctx, sel, key);
+
+ if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
+ key->mono.u.vs_export_prim_id = 1;
+ }
break;
case PIPE_SHADER_GEOMETRY:
+ if (sctx->b.chip_class >= GFX9) {
+ if (sctx->tes_shader.cso) {
+ key->part.gs.es = sctx->tes_shader.cso;
+ } else {
+ si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
+ key, &key->part.gs.vs_prolog);
+ key->part.gs.es = sctx->vs_shader.cso;
+ }
+
+ /* Merged ES-GS can have unbalanced wave usage.
+ *
+ * ES threads are per-vertex, while GS threads are
+ * per-primitive. So without any amplification, there
+ * are fewer GS threads than ES threads, which can result
+ * in empty (no-op) GS waves. With too much amplification,
+ * there are more GS threads than ES threads, which
+ * can result in empty (no-op) ES waves.
+ *
+ * Non-monolithic shaders are implemented by setting EXEC
+ * at the beginning of shader parts, and don't jump to
+ * the end if EXEC is 0.
+ *
+ * Monolithic shaders use conditional blocks, so they can
+ * jump and skip empty waves of ES or GS. So set this to
+ * always use optimized variants, which are monolithic.
+ */
+ key->opt.prefer_mono = 1;
+ }
+ key->part.gs.prolog.tri_strip_adj_fix = sctx->gs_tri_strip_adj_fix;
break;
case PIPE_SHADER_FRAGMENT: {
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
if (sel->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
sel->info.colors_written == 0x1)
- key->ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
+ key->part.ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1;
if (blend) {
/* Select the shader color format based on whether
* blending or alpha are needed.
*/
- key->ps.epilog.spi_shader_col_format =
+ key->part.ps.epilog.spi_shader_col_format =
(blend->blend_enable_4bit & blend->need_src_alpha_4bit &
sctx->framebuffer.spi_shader_col_format_blend_alpha) |
(blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
sctx->framebuffer.spi_shader_col_format_alpha) |
(~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
sctx->framebuffer.spi_shader_col_format);
+ key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
/* The output for dual source blending should have
* the same format as the first output.
*/
if (blend->dual_src_blend)
- key->ps.epilog.spi_shader_col_format |=
- (key->ps.epilog.spi_shader_col_format & 0xf) << 4;
+ key->part.ps.epilog.spi_shader_col_format |=
+ (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4;
} else
- key->ps.epilog.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format;
+ key->part.ps.epilog.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format;
/* If alpha-to-coverage is enabled, we have to export alpha
* even if there is no color buffer.
*/
- if (!(key->ps.epilog.spi_shader_col_format & 0xf) &&
+ if (!(key->part.ps.epilog.spi_shader_col_format & 0xf) &&
blend && blend->alpha_to_coverage)
- key->ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
+ key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR;
/* On SI and CIK except Hawaii, the CB doesn't clamp outputs
* to the range supported by the type if a channel has less
* than 16 bits and the export format is 16_ABGR.
*/
- if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII)
- key->ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
+ if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII) {
+ key->part.ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
+ key->part.ps.epilog.color_is_int10 = sctx->framebuffer.color_is_int10;
+ }
/* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */
- if (!key->ps.epilog.last_cbuf) {
- key->ps.epilog.spi_shader_col_format &= sel->colors_written_4bit;
- key->ps.epilog.color_is_int8 &= sel->info.colors_written;
+ if (!key->part.ps.epilog.last_cbuf) {
+ key->part.ps.epilog.spi_shader_col_format &= sel->colors_written_4bit;
+ key->part.ps.epilog.color_is_int8 &= sel->info.colors_written;
+ key->part.ps.epilog.color_is_int10 &= sel->info.colors_written;
}
if (rs) {
sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES_ADJACENCY;
bool is_line = !is_poly && sctx->current_rast_prim != PIPE_PRIM_POINTS;
- key->ps.prolog.color_two_side = rs->two_side && sel->info.colors_read;
- key->ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read;
+ key->part.ps.prolog.color_two_side = rs->two_side && sel->info.colors_read;
+ key->part.ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read;
if (sctx->queued.named.blend) {
- key->ps.epilog.alpha_to_one = sctx->queued.named.blend->alpha_to_one &&
+ key->part.ps.epilog.alpha_to_one = sctx->queued.named.blend->alpha_to_one &&
rs->multisample_enable;
}
- key->ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly;
- key->ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) ||
+ key->part.ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly;
+ key->part.ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) ||
(is_line && rs->line_smooth)) &&
sctx->framebuffer.nr_samples <= 1;
- key->ps.epilog.clamp_color = rs->clamp_fragment_color;
+ key->part.ps.epilog.clamp_color = rs->clamp_fragment_color;
+
+ if (sctx->ps_iter_samples > 1 &&
+ sel->info.reads_samplemask) {
+ key->part.ps.prolog.samplemask_log_ps_iter =
+ util_logbase2(util_next_power_of_two(sctx->ps_iter_samples));
+ }
if (rs->force_persample_interp &&
rs->multisample_enable &&
sctx->framebuffer.nr_samples > 1 &&
sctx->ps_iter_samples > 1) {
- key->ps.prolog.force_persp_sample_interp =
+ key->part.ps.prolog.force_persp_sample_interp =
sel->info.uses_persp_center ||
sel->info.uses_persp_centroid;
- key->ps.prolog.force_linear_sample_interp =
+ key->part.ps.prolog.force_linear_sample_interp =
sel->info.uses_linear_center ||
sel->info.uses_linear_centroid;
} else if (rs->multisample_enable &&
sctx->framebuffer.nr_samples > 1) {
- key->ps.prolog.bc_optimize_for_persp =
+ key->part.ps.prolog.bc_optimize_for_persp =
sel->info.uses_persp_center &&
sel->info.uses_persp_centroid;
- key->ps.prolog.bc_optimize_for_linear =
+ key->part.ps.prolog.bc_optimize_for_linear =
sel->info.uses_linear_center &&
sel->info.uses_linear_centroid;
} else {
/* Make sure SPI doesn't compute more than 1 pair
* of (i,j), which is the optimization here. */
- key->ps.prolog.force_persp_center_interp =
+ key->part.ps.prolog.force_persp_center_interp =
sel->info.uses_persp_center +
sel->info.uses_persp_centroid +
sel->info.uses_persp_sample > 1;
- key->ps.prolog.force_linear_center_interp =
+ key->part.ps.prolog.force_linear_center_interp =
sel->info.uses_linear_center +
sel->info.uses_linear_centroid +
sel->info.uses_linear_sample > 1;
+
+ if (sel->info.opcode_count[TGSI_OPCODE_INTERP_SAMPLE])
+ key->mono.u.ps.interpolate_at_sample_force_center = 1;
}
}
- key->ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
+ key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
break;
}
default:
assert(0);
}
+
+ if (unlikely(sctx->screen->b.debug_flags & DBG(NO_OPT_VARIANT)))
+ memset(&key->opt, 0, sizeof(key->opt));
+}
+
+static void si_build_shader_variant(struct si_shader *shader,
+ int thread_index,
+ bool low_priority)
+{
+ struct si_shader_selector *sel = shader->selector;
+ struct si_screen *sscreen = sel->screen;
+ LLVMTargetMachineRef tm;
+ struct pipe_debug_callback *debug = &shader->compiler_ctx_state.debug;
+ int r;
+
+ if (thread_index >= 0) {
+ if (low_priority) {
+ assert(thread_index < ARRAY_SIZE(sscreen->tm_low_priority));
+ tm = sscreen->tm_low_priority[thread_index];
+ } else {
+ assert(thread_index < ARRAY_SIZE(sscreen->tm));
+ tm = sscreen->tm[thread_index];
+ }
+ if (!debug->async)
+ debug = NULL;
+ } else {
+ assert(!low_priority);
+ tm = shader->compiler_ctx_state.tm;
+ }
+
+ r = si_shader_create(sscreen, tm, shader, debug);
+ if (unlikely(r)) {
+ R600_ERR("Failed to build shader variant (type=%u) %d\n",
+ sel->type, r);
+ shader->compilation_failed = true;
+ return;
+ }
+
+ if (shader->compiler_ctx_state.is_debug_context) {
+ FILE *f = open_memstream(&shader->shader_log,
+ &shader->shader_log_size);
+ if (f) {
+ si_shader_dump(sscreen, shader, NULL, sel->type, f, false);
+ fclose(f);
+ }
+ }
+
+ si_shader_init_pm4_state(sscreen, shader);
+}
+
+static void si_build_shader_variant_low_priority(void *job, int thread_index)
+{
+ struct si_shader *shader = (struct si_shader *)job;
+
+ assert(thread_index >= 0);
+
+ si_build_shader_variant(shader, thread_index, true);
+}
+
+static const struct si_shader_key zeroed;
+
+static bool si_check_missing_main_part(struct si_screen *sscreen,
+ struct si_shader_selector *sel,
+ struct si_compiler_ctx_state *compiler_state,
+ struct si_shader_key *key)
+{
+ struct si_shader **mainp = si_get_main_shader_part(sel, key);
+
+ if (!*mainp) {
+ struct si_shader *main_part = CALLOC_STRUCT(si_shader);
+
+ if (!main_part)
+ return false;
+
+ /* We can leave the fence as permanently signaled because the
+ * main part becomes visible globally only after it has been
+ * compiled. */
+ util_queue_fence_init(&main_part->ready);
+
+ main_part->selector = sel;
+ main_part->key.as_es = key->as_es;
+ main_part->key.as_ls = key->as_ls;
+
+ if (si_compile_tgsi_shader(sscreen, compiler_state->tm,
+ main_part, false,
+ &compiler_state->debug) != 0) {
+ FREE(main_part);
+ return false;
+ }
+ *mainp = main_part;
+ }
+ return true;
}
/* Select the hw shader variant depending on the current state. */
static int si_shader_select_with_key(struct si_screen *sscreen,
struct si_shader_ctx_state *state,
- union si_shader_key *key,
- LLVMTargetMachineRef tm,
- struct pipe_debug_callback *debug,
- bool wait,
- bool is_debug_context)
+ struct si_compiler_ctx_state *compiler_state,
+ struct si_shader_key *key,
+ int thread_index)
{
struct si_shader_selector *sel = state->cso;
+ struct si_shader_selector *previous_stage_sel = NULL;
struct si_shader *current = state->current;
struct si_shader *iter, *shader = NULL;
- int r;
+again:
/* Check if we don't need to change anything.
* This path is also used for most shaders that don't need multiple
* variants, it will cost just a computation of the key and this
* test. */
- if (likely(current && memcmp(¤t->key, key, sizeof(*key)) == 0))
- return 0;
+ if (likely(current &&
+ memcmp(¤t->key, key, sizeof(*key)) == 0)) {
+ if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) {
+ if (current->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ goto current_not_ready;
+ }
+
+ util_queue_fence_wait(¤t->ready);
+ }
+
+ return current->compilation_failed ? -1 : 0;
+ }
+current_not_ready:
/* This must be done before the mutex is locked, because async GS
* compilation calls this function too, and therefore must enter
* the mutex first.
+ *
+ * Only wait if we are in a draw call. Don't wait if we are
+ * in a compiler thread.
*/
- if (wait)
- util_queue_job_wait(&sel->ready);
+ if (thread_index < 0)
+ util_queue_fence_wait(&sel->ready);
- pipe_mutex_lock(sel->mutex);
+ mtx_lock(&sel->mutex);
/* Find the shader variant. */
for (iter = sel->first_variant; iter; iter = iter->next_variant) {
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
+ mtx_unlock(&sel->mutex);
+
+ if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
+ /* If it's an optimized shader and its compilation has
+ * been started but isn't done, use the unoptimized
+ * shader so as not to cause a stall due to compilation.
+ */
+ if (iter->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ goto again;
+ }
+
+ util_queue_fence_wait(&iter->ready);
+ }
+
+ if (iter->compilation_failed) {
+ return -1; /* skip the draw call */
+ }
+
state->current = iter;
- pipe_mutex_unlock(sel->mutex);
return 0;
}
}
/* Build a new shader. */
shader = CALLOC_STRUCT(si_shader);
if (!shader) {
- pipe_mutex_unlock(sel->mutex);
+ mtx_unlock(&sel->mutex);
return -ENOMEM;
}
+
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
shader->key = *key;
+ shader->compiler_ctx_state = *compiler_state;
+
+ /* If this is a merged shader, get the first shader's selector. */
+ if (sscreen->b.chip_class >= GFX9) {
+ if (sel->type == PIPE_SHADER_TESS_CTRL)
+ previous_stage_sel = key->part.tcs.ls;
+ else if (sel->type == PIPE_SHADER_GEOMETRY)
+ previous_stage_sel = key->part.gs.es;
+
+ /* We need to wait for the previous shader. */
+ if (previous_stage_sel && thread_index < 0)
+ util_queue_fence_wait(&previous_stage_sel->ready);
+ }
- r = si_shader_create(sscreen, tm, shader, debug);
- if (unlikely(r)) {
- R600_ERR("Failed to build shader variant (type=%u) %d\n",
- sel->type, r);
- FREE(shader);
- pipe_mutex_unlock(sel->mutex);
- return r;
+ /* Compile the main shader part if it doesn't exist. This can happen
+ * if the initial guess was wrong. */
+ bool is_pure_monolithic =
+ sscreen->use_monolithic_shaders ||
+ memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0;
+
+ if (!is_pure_monolithic) {
+ bool ok;
+
+ /* Make sure the main shader part is present. This is needed
+ * for shaders that can be compiled as VS, LS, or ES, and only
+ * one of them is compiled at creation.
+ *
+ * For merged shaders, check that the starting shader's main
+ * part is present.
+ */
+ if (previous_stage_sel) {
+ struct si_shader_key shader1_key = zeroed;
+
+ if (sel->type == PIPE_SHADER_TESS_CTRL)
+ shader1_key.as_ls = 1;
+ else if (sel->type == PIPE_SHADER_GEOMETRY)
+ shader1_key.as_es = 1;
+ else
+ assert(0);
+
+ mtx_lock(&previous_stage_sel->mutex);
+ ok = si_check_missing_main_part(sscreen,
+ previous_stage_sel,
+ compiler_state, &shader1_key);
+ mtx_unlock(&previous_stage_sel->mutex);
+ } else {
+ ok = si_check_missing_main_part(sscreen, sel,
+ compiler_state, key);
+ }
+ if (!ok) {
+ FREE(shader);
+ mtx_unlock(&sel->mutex);
+ return -ENOMEM; /* skip the draw call */
+ }
}
- if (is_debug_context) {
- FILE *f = open_memstream(&shader->shader_log,
- &shader->shader_log_size);
- if (f) {
- si_shader_dump(sscreen, shader, NULL, sel->type, f);
- fclose(f);
+ /* Keep the reference to the 1st shader of merged shaders, so that
+ * Gallium can't destroy it before we destroy the 2nd shader.
+ *
+ * Set sctx = NULL, because it's unused if we're not releasing
+ * the shader, and we don't have any sctx here.
+ */
+ si_shader_selector_reference(NULL, &shader->previous_stage_sel,
+ previous_stage_sel);
+
+ /* Monolithic-only shaders don't make a distinction between optimized
+ * and unoptimized. */
+ shader->is_monolithic =
+ is_pure_monolithic ||
+ memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
+
+ shader->is_optimized =
+ !is_pure_monolithic &&
+ memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
+
+ /* If it's an optimized shader, compile it asynchronously. */
+ if (shader->is_optimized &&
+ !is_pure_monolithic &&
+ thread_index < 0) {
+ /* Compile it asynchronously. */
+ util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
+ shader, &shader->ready,
+ si_build_shader_variant_low_priority, NULL);
+
+ /* Add only after the ready fence was reset, to guard against a
+ * race with si_bind_XX_shader. */
+ if (!sel->last_variant) {
+ sel->first_variant = shader;
+ sel->last_variant = shader;
+ } else {
+ sel->last_variant->next_variant = shader;
+ sel->last_variant = shader;
}
+
+ /* Use the default (unoptimized) shader for now. */
+ memset(&key->opt, 0, sizeof(key->opt));
+ mtx_unlock(&sel->mutex);
+ goto again;
}
- si_shader_init_pm4_state(sscreen, shader);
+ /* Reset the fence before adding to the variant list. */
+ util_queue_fence_reset(&shader->ready);
if (!sel->last_variant) {
sel->first_variant = shader;
sel->last_variant->next_variant = shader;
sel->last_variant = shader;
}
- state->current = shader;
- pipe_mutex_unlock(sel->mutex);
- return 0;
+
+ mtx_unlock(&sel->mutex);
+
+ assert(!shader->is_optimized);
+ si_build_shader_variant(shader, thread_index, false);
+
+ util_queue_fence_signal(&shader->ready);
+
+ if (!shader->compilation_failed)
+ state->current = shader;
+
+ return shader->compilation_failed ? -1 : 0;
}
static int si_shader_select(struct pipe_context *ctx,
- struct si_shader_ctx_state *state)
+ struct si_shader_ctx_state *state,
+ struct si_compiler_ctx_state *compiler_state)
{
struct si_context *sctx = (struct si_context *)ctx;
- union si_shader_key key;
+ struct si_shader_key key;
si_shader_selector_key(ctx, state->cso, &key);
- return si_shader_select_with_key(sctx->screen, state, &key,
- sctx->tm, &sctx->b.debug, true,
- sctx->is_debug);
+ return si_shader_select_with_key(sctx->screen, state, compiler_state,
+ &key, -1);
}
static void si_parse_next_shader_property(const struct tgsi_shader_info *info,
- union si_shader_key *key)
+ bool streamout,
+ struct si_shader_key *key)
{
unsigned next_shader = info->properties[TGSI_PROPERTY_NEXT_SHADER];
case PIPE_SHADER_VERTEX:
switch (next_shader) {
case PIPE_SHADER_GEOMETRY:
- key->vs.as_es = 1;
+ key->as_es = 1;
break;
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL:
- key->vs.as_ls = 1;
+ key->as_ls = 1;
break;
+ default:
+ /* If POSITION isn't written, it can only be a HW VS
+ * if streamout is used. If streamout isn't used,
+ * assume that it's a HW LS. (the next shader is TCS)
+ * This heuristic is needed for separate shader objects.
+ */
+ if (!info->writes_position && !streamout)
+ key->as_ls = 1;
}
break;
case PIPE_SHADER_TESS_EVAL:
- if (next_shader == PIPE_SHADER_GEOMETRY)
- key->tes.as_es = 1;
+ if (next_shader == PIPE_SHADER_GEOMETRY ||
+ !info->writes_position)
+ key->as_es = 1;
break;
}
}
* si_shader_selector initialization. Since it can be done asynchronously,
* there is no way to report compile failures to applications.
*/
-void si_init_shader_selector_async(void *job, int thread_index)
+static void si_init_shader_selector_async(void *job, int thread_index)
{
struct si_shader_selector *sel = (struct si_shader_selector *)job;
struct si_screen *sscreen = sel->screen;
LLVMTargetMachineRef tm;
- struct pipe_debug_callback *debug = &sel->debug;
+ struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
unsigned i;
- if (thread_index >= 0) {
- assert(thread_index < ARRAY_SIZE(sscreen->tm));
- tm = sscreen->tm[thread_index];
- if (!debug->async)
- debug = NULL;
- } else {
- tm = sel->tm;
- }
+ assert(!debug->debug_message || debug->async);
+ assert(thread_index >= 0);
+ assert(thread_index < ARRAY_SIZE(sscreen->tm));
+ tm = sscreen->tm[thread_index];
/* Compile the main shader part for use with a prolog and/or epilog.
* If this fails, the driver will try to compile a monolithic shader
* on demand.
*/
- if (sel->type != PIPE_SHADER_GEOMETRY &&
- !sscreen->use_monolithic_shaders) {
+ if (!sscreen->use_monolithic_shaders) {
struct si_shader *shader = CALLOC_STRUCT(si_shader);
- void *tgsi_binary;
+ void *tgsi_binary = NULL;
if (!shader) {
fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
return;
}
+ /* We can leave the fence signaled because use of the default
+ * main part is guarded by the selector's ready fence. */
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
- si_parse_next_shader_property(&sel->info, &shader->key);
+ si_parse_next_shader_property(&sel->info,
+ sel->so.num_outputs != 0,
+ &shader->key);
- tgsi_binary = si_get_tgsi_binary(sel);
+ if (sel->tokens)
+ tgsi_binary = si_get_tgsi_binary(sel);
/* Try to load the shader from the shader cache. */
- pipe_mutex_lock(sscreen->shader_cache_mutex);
+ mtx_lock(&sscreen->shader_cache_mutex);
if (tgsi_binary &&
si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
- FREE(tgsi_binary);
- pipe_mutex_unlock(sscreen->shader_cache_mutex);
+ mtx_unlock(&sscreen->shader_cache_mutex);
} else {
- pipe_mutex_unlock(sscreen->shader_cache_mutex);
+ mtx_unlock(&sscreen->shader_cache_mutex);
/* Compile the shader if it hasn't been loaded from the cache. */
if (si_compile_tgsi_shader(sscreen, tm, shader, false,
}
if (tgsi_binary) {
- pipe_mutex_lock(sscreen->shader_cache_mutex);
- if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader))
+ mtx_lock(&sscreen->shader_cache_mutex);
+ if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
FREE(tgsi_binary);
- pipe_mutex_unlock(sscreen->shader_cache_mutex);
+ mtx_unlock(&sscreen->shader_cache_mutex);
}
}
- sel->main_shader_part = shader;
+ *si_get_main_shader_part(sel, &shader->key) = shader;
+
+ /* Unset "outputs_written" flags for outputs converted to
+ * DEFAULT_VAL, so that later inter-shader optimizations don't
+ * try to eliminate outputs that don't exist in the final
+ * shader.
+ *
+ * This is only done if non-monolithic shaders are enabled.
+ */
+ if ((sel->type == PIPE_SHADER_VERTEX ||
+ sel->type == PIPE_SHADER_TESS_EVAL) &&
+ !shader->key.as_ls &&
+ !shader->key.as_es) {
+ unsigned i;
+
+ for (i = 0; i < sel->info.num_outputs; i++) {
+ unsigned offset = shader->info.vs_output_param_offset[i];
+
+ if (offset <= AC_EXP_PARAM_OFFSET_31)
+ continue;
+
+ unsigned name = sel->info.output_semantic_name[i];
+ unsigned index = sel->info.output_semantic_index[i];
+ unsigned id;
+
+ switch (name) {
+ case TGSI_SEMANTIC_GENERIC:
+ /* don't process indices the function can't handle */
+ if (index >= SI_MAX_IO_GENERIC)
+ break;
+ /* fall through */
+ default:
+ id = si_shader_io_get_unique_index(name, index);
+ sel->outputs_written &= ~(1ull << id);
+ break;
+ case TGSI_SEMANTIC_POSITION: /* ignore these */
+ case TGSI_SEMANTIC_PSIZE:
+ case TGSI_SEMANTIC_CLIPVERTEX:
+ case TGSI_SEMANTIC_EDGEFLAG:
+ break;
+ }
+ }
+ }
}
/* Pre-compilation. */
- if (sel->type == PIPE_SHADER_GEOMETRY ||
- sscreen->b.debug_flags & DBG_PRECOMPILE) {
+ if (sscreen->b.debug_flags & DBG(PRECOMPILE) &&
+ /* GFX9 needs LS or ES for compilation, which we don't have here. */
+ (sscreen->b.chip_class <= VI ||
+ (sel->type != PIPE_SHADER_TESS_CTRL &&
+ sel->type != PIPE_SHADER_GEOMETRY))) {
struct si_shader_ctx_state state = {sel};
- union si_shader_key key;
+ struct si_shader_key key;
memset(&key, 0, sizeof(key));
- si_parse_next_shader_property(&sel->info, &key);
+ si_parse_next_shader_property(&sel->info,
+ sel->so.num_outputs != 0,
+ &key);
+
+ /* GFX9 doesn't have LS and ES. */
+ if (sscreen->b.chip_class >= GFX9) {
+ key.as_ls = 0;
+ key.as_es = 0;
+ }
/* Set reasonable defaults, so that the shader key doesn't
* cause any code to be eliminated.
*/
switch (sel->type) {
case PIPE_SHADER_TESS_CTRL:
- key.tcs.epilog.prim_mode = PIPE_PRIM_TRIANGLES;
+ key.part.tcs.epilog.prim_mode = PIPE_PRIM_TRIANGLES;
break;
case PIPE_SHADER_FRAGMENT:
- key.ps.prolog.bc_optimize_for_persp =
+ key.part.ps.prolog.bc_optimize_for_persp =
sel->info.uses_persp_center &&
sel->info.uses_persp_centroid;
- key.ps.prolog.bc_optimize_for_linear =
+ key.part.ps.prolog.bc_optimize_for_linear =
sel->info.uses_linear_center &&
sel->info.uses_linear_centroid;
- key.ps.epilog.alpha_func = PIPE_FUNC_ALWAYS;
+ key.part.ps.epilog.alpha_func = PIPE_FUNC_ALWAYS;
for (i = 0; i < 8; i++)
if (sel->info.colors_written & (1 << i))
- key.ps.epilog.spi_shader_col_format |=
+ key.part.ps.epilog.spi_shader_col_format |=
V_028710_SPI_SHADER_FP16_ABGR << (i * 4);
break;
}
- if (si_shader_select_with_key(sscreen, &state, &key, tm, debug,
- false, sel->is_debug_context))
+ if (si_shader_select_with_key(sscreen, &state,
+ &sel->compiler_ctx_state, &key,
+ thread_index))
fprintf(stderr, "radeonsi: can't create a monolithic shader\n");
}
+
+ /* The GS copy shader is always pre-compiled. */
+ if (sel->type == PIPE_SHADER_GEOMETRY) {
+ sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, tm, sel, debug);
+ if (!sel->gs_copy_shader) {
+ fprintf(stderr, "radeonsi: can't create GS copy shader\n");
+ return;
+ }
+
+ si_shader_vs(sscreen, sel->gs_copy_shader, sel);
+ }
+}
+
+/* Return descriptor slot usage masks from the given shader info. */
+void si_get_active_slot_masks(const struct tgsi_shader_info *info,
+ uint32_t *const_and_shader_buffers,
+ uint64_t *samplers_and_images)
+{
+ unsigned start, num_shaderbufs, num_constbufs, num_images, num_samplers;
+
+ num_shaderbufs = util_last_bit(info->shader_buffers_declared);
+ num_constbufs = util_last_bit(info->const_buffers_declared);
+ /* two 8-byte images share one 16-byte slot */
+ num_images = align(util_last_bit(info->images_declared), 2);
+ num_samplers = util_last_bit(info->samplers_declared);
+
+ /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
+ start = si_get_shaderbuf_slot(num_shaderbufs - 1);
+ *const_and_shader_buffers =
+ u_bit_consecutive(start, num_shaderbufs + num_constbufs);
+
+ /* The layout is: image[last] ... image[0], sampler[0] ... sampler[last] */
+ start = si_get_image_slot(num_images - 1) / 2;
+ *samplers_and_images =
+ u_bit_consecutive64(start, num_images / 2 + num_samplers);
}
static void *si_create_shader_selector(struct pipe_context *ctx,
if (!sel)
return NULL;
+ pipe_reference_init(&sel->reference, 1);
sel->screen = sscreen;
- sel->tm = sctx->tm;
- sel->debug = sctx->b.debug;
- sel->is_debug_context = sctx->is_debug;
- sel->tokens = tgsi_dup_tokens(state->tokens);
- if (!sel->tokens) {
- FREE(sel);
- return NULL;
- }
+ sel->compiler_ctx_state.debug = sctx->debug;
+ sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
sel->so = state->stream_output;
- tgsi_scan_shader(state->tokens, &sel->info);
+
+ if (state->type == PIPE_SHADER_IR_TGSI) {
+ sel->tokens = tgsi_dup_tokens(state->tokens);
+ if (!sel->tokens) {
+ FREE(sel);
+ return NULL;
+ }
+
+ tgsi_scan_shader(state->tokens, &sel->info);
+ tgsi_scan_tess_ctrl(state->tokens, &sel->info, &sel->tcs_info);
+ } else {
+ assert(state->type == PIPE_SHADER_IR_NIR);
+
+ sel->nir = state->ir.nir;
+
+ si_nir_scan_shader(sel->nir, &sel->info);
+
+ si_lower_nir(sel);
+ }
+
sel->type = sel->info.processor;
p_atomic_inc(&sscreen->b.num_shaders_created);
+ si_get_active_slot_masks(&sel->info,
+ &sel->active_const_and_shader_buffers,
+ &sel->active_samplers_and_images);
+
+ /* Record which streamout buffers are enabled. */
+ for (i = 0; i < sel->so.num_outputs; i++) {
+ sel->enabled_streamout_buffer_mask |=
+ (1 << sel->so.output[i].output_buffer) <<
+ (sel->so.output[i].stream * 4);
+ }
+
+ /* The prolog is a no-op if there are no inputs. */
+ sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX &&
+ sel->info.num_inputs &&
+ !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+
+ sel->force_correct_derivs_after_kill =
+ sel->type == PIPE_SHADER_FRAGMENT &&
+ sel->info.uses_derivatives &&
+ sel->info.uses_kill &&
+ sctx->screen->b.debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL);
/* Set which opcode uses which (i,j) pair. */
if (sel->info.uses_persp_opcode_interp_centroid)
case PIPE_SHADER_TESS_CTRL:
/* Always reserve space for these. */
sel->patch_outputs_written |=
- (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0)) |
- (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0));
+ (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0)) |
+ (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0));
/* fall through */
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_EVAL:
case TGSI_SEMANTIC_TESSOUTER:
case TGSI_SEMANTIC_PATCH:
sel->patch_outputs_written |=
- 1llu << si_shader_io_get_unique_index(name, index);
+ 1ull << si_shader_io_get_unique_index_patch(name, index);
break;
+
+ case TGSI_SEMANTIC_GENERIC:
+ /* don't process indices the function can't handle */
+ if (index >= SI_MAX_IO_GENERIC)
+ break;
+ /* fall through */
default:
sel->outputs_written |=
- 1llu << si_shader_io_get_unique_index(name, index);
+ 1ull << si_shader_io_get_unique_index(name, index);
+ break;
+ case TGSI_SEMANTIC_CLIPVERTEX: /* ignore these */
+ case TGSI_SEMANTIC_EDGEFLAG:
+ break;
}
}
sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16;
+
+ /* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
+ * conflicts, i.e. each vertex will start at a different bank.
+ */
+ if (sctx->b.chip_class >= GFX9)
+ sel->esgs_itemsize += 4;
break;
case PIPE_SHADER_FRAGMENT:
+ for (i = 0; i < sel->info.num_inputs; i++) {
+ unsigned name = sel->info.input_semantic_name[i];
+ unsigned index = sel->info.input_semantic_index[i];
+
+ switch (name) {
+ case TGSI_SEMANTIC_GENERIC:
+ /* don't process indices the function can't handle */
+ if (index >= SI_MAX_IO_GENERIC)
+ break;
+ /* fall through */
+ default:
+ sel->inputs_read |=
+ 1ull << si_shader_io_get_unique_index(name, index);
+ break;
+ case TGSI_SEMANTIC_PCOORD: /* ignore this */
+ break;
+ }
+ }
+
for (i = 0; i < 8; i++)
if (sel->info.colors_written & (1 << i))
sel->colors_written_4bit |= 0xf << (4 * i);
break;
}
+ /* PA_CL_VS_OUT_CNTL */
+ bool misc_vec_ena =
+ sel->info.writes_psize || sel->info.writes_edgeflag ||
+ sel->info.writes_layer || sel->info.writes_viewport_index;
+ sel->pa_cl_vs_out_cntl =
+ S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) |
+ S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag) |
+ S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
+ S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
+ S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
+ S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
+ sel->clipdist_mask = sel->info.writes_clipvertex ?
+ SIX_BITS : sel->info.clipdist_writemask;
+ sel->culldist_mask = sel->info.culldist_writemask <<
+ sel->info.num_written_clipdistance;
+
/* DB_SHADER_CONTROL */
sel->db_shader_control =
S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) |
sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z);
}
- pipe_mutex_init(sel->mutex);
+ (void) mtx_init(&sel->mutex, mtx_plain);
util_queue_fence_init(&sel->ready);
- if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
- sctx->is_debug ||
- r600_can_dump_shader(&sscreen->b, sel->info.processor) ||
- !util_queue_is_initialized(&sscreen->shader_compiler_queue))
- si_init_shader_selector_async(sel, -1);
- else
- util_queue_add_job(&sscreen->shader_compiler_queue, sel,
- &sel->ready, si_init_shader_selector_async,
- NULL);
+ struct util_async_debug_callback async_debug;
+ bool wait =
+ (sctx->debug.debug_message && !sctx->debug.async) ||
+ sctx->is_debug ||
+ si_can_dump_shader(sscreen, sel->info.processor);
+
+ if (wait) {
+ u_async_debug_init(&async_debug);
+ sel->compiler_ctx_state.debug = async_debug.base;
+ }
+
+ util_queue_add_job(&sscreen->shader_compiler_queue, sel,
+ &sel->ready, si_init_shader_selector_async,
+ NULL);
+
+ if (wait) {
+ util_queue_fence_wait(&sel->ready);
+ u_async_debug_drain(&async_debug, &sctx->debug);
+ u_async_debug_cleanup(&async_debug);
+ }
return sel;
}
+static void si_update_streamout_state(struct si_context *sctx)
+{
+ struct si_shader_selector *shader_with_so = si_get_vs(sctx)->cso;
+
+ if (!shader_with_so)
+ return;
+
+ sctx->streamout.enabled_stream_buffers_mask =
+ shader_with_so->enabled_streamout_buffer_mask;
+ sctx->streamout.stride_in_dw = shader_with_so->so.stride;
+}
+
+static void si_update_clip_regs(struct si_context *sctx,
+ struct si_shader_selector *old_hw_vs,
+ struct si_shader *old_hw_vs_variant,
+ struct si_shader_selector *next_hw_vs,
+ struct si_shader *next_hw_vs_variant)
+{
+ if (next_hw_vs &&
+ (!old_hw_vs ||
+ old_hw_vs->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] !=
+ next_hw_vs->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] ||
+ old_hw_vs->pa_cl_vs_out_cntl != next_hw_vs->pa_cl_vs_out_cntl ||
+ old_hw_vs->clipdist_mask != next_hw_vs->clipdist_mask ||
+ old_hw_vs->culldist_mask != next_hw_vs->culldist_mask ||
+ !old_hw_vs_variant ||
+ !next_hw_vs_variant ||
+ old_hw_vs_variant->key.opt.clip_disable !=
+ next_hw_vs_variant->key.opt.clip_disable))
+ si_mark_atom_dirty(sctx, &sctx->clip_regs);
+}
+
+static void si_update_common_shader_state(struct si_context *sctx)
+{
+ sctx->uses_bindless_samplers =
+ si_shader_uses_bindless_samplers(sctx->vs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->gs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->ps_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->tcs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->tes_shader.cso);
+ sctx->uses_bindless_images =
+ si_shader_uses_bindless_images(sctx->vs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->gs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->ps_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->tcs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->tes_shader.cso);
+ sctx->do_update_shaders = true;
+}
+
static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
+ struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
struct si_shader_selector *sel = state;
if (sctx->vs_shader.cso == sel)
sctx->vs_shader.cso = sel;
sctx->vs_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS] : 0;
+
+ si_update_common_shader_state(sctx);
+ si_update_vs_viewport_state(sctx);
+ si_set_active_descriptors_for_shader(sctx, sel);
+ si_update_streamout_state(sctx);
+ si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
+ si_get_vs(sctx)->cso, si_get_vs_state(sctx));
+}
+
+static void si_update_tess_uses_prim_id(struct si_context *sctx)
+{
+ sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id =
+ (sctx->tes_shader.cso &&
+ sctx->tes_shader.cso->info.uses_primid) ||
+ (sctx->tcs_shader.cso &&
+ sctx->tcs_shader.cso->info.uses_primid) ||
+ (sctx->gs_shader.cso &&
+ sctx->gs_shader.cso->info.uses_primid) ||
+ (sctx->ps_shader.cso && !sctx->gs_shader.cso &&
+ sctx->ps_shader.cso->info.uses_primid);
}
static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
+ struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
struct si_shader_selector *sel = state;
bool enable_changed = !!sctx->gs_shader.cso != !!sel;
sctx->gs_shader.cso = sel;
sctx->gs_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
+ sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
+
+ si_update_common_shader_state(sctx);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
- if (enable_changed)
+ if (enable_changed) {
si_shader_change_notify(sctx);
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ if (sctx->ia_multi_vgt_param_key.u.uses_tess)
+ si_update_tess_uses_prim_id(sctx);
+ }
+ si_update_vs_viewport_state(sctx);
+ si_set_active_descriptors_for_shader(sctx, sel);
+ si_update_streamout_state(sctx);
+ si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
+ si_get_vs(sctx)->cso, si_get_vs_state(sctx));
}
static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
sctx->tcs_shader.cso = sel;
sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
+ si_update_tess_uses_prim_id(sctx);
+
+ si_update_common_shader_state(sctx);
if (enable_changed)
sctx->last_tcs = NULL; /* invalidate derived tess state */
+
+ si_set_active_descriptors_for_shader(sctx, sel);
}
static void si_bind_tes_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
+ struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
struct si_shader_selector *sel = state;
bool enable_changed = !!sctx->tes_shader.cso != !!sel;
sctx->tes_shader.cso = sel;
sctx->tes_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
+ sctx->ia_multi_vgt_param_key.u.uses_tess = sel != NULL;
+ si_update_tess_uses_prim_id(sctx);
+
+ si_update_common_shader_state(sctx);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
if (enable_changed) {
si_shader_change_notify(sctx);
sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
}
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ si_update_vs_viewport_state(sctx);
+ si_set_active_descriptors_for_shader(sctx, sel);
+ si_update_streamout_state(sctx);
+ si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
+ si_get_vs(sctx)->cso, si_get_vs_state(sctx));
}
static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_sel = sctx->ps_shader.cso;
struct si_shader_selector *sel = state;
/* skip if supplied shader is one already in use */
- if (sctx->ps_shader.cso == sel)
+ if (old_sel == sel)
return;
sctx->ps_shader.cso = sel;
sctx->ps_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
- si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+
+ si_update_common_shader_state(sctx);
+ if (sel) {
+ if (sctx->ia_multi_vgt_param_key.u.uses_tess)
+ si_update_tess_uses_prim_id(sctx);
+
+ if (!old_sel ||
+ old_sel->info.colors_written != sel->info.colors_written)
+ si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+
+ if (sctx->screen->has_out_of_order_rast &&
+ (!old_sel ||
+ old_sel->info.writes_memory != sel->info.writes_memory ||
+ old_sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] !=
+ sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]))
+ si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ }
+ si_set_active_descriptors_for_shader(sctx, sel);
}
static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
{
+ if (shader->is_optimized) {
+ util_queue_drop_job(&sctx->screen->shader_compiler_queue_low_priority,
+ &shader->ready);
+ }
+
+ util_queue_fence_destroy(&shader->ready);
+
if (shader->pm4) {
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
- if (shader->key.vs.as_ls)
+ if (shader->key.as_ls) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_delete_state(sctx, ls, shader->pm4);
- else if (shader->key.vs.as_es)
+ } else if (shader->key.as_es) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_delete_state(sctx, es, shader->pm4);
- else
+ } else {
si_pm4_delete_state(sctx, vs, shader->pm4);
+ }
break;
case PIPE_SHADER_TESS_CTRL:
si_pm4_delete_state(sctx, hs, shader->pm4);
break;
case PIPE_SHADER_TESS_EVAL:
- if (shader->key.tes.as_es)
+ if (shader->key.as_es) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_delete_state(sctx, es, shader->pm4);
- else
+ } else {
si_pm4_delete_state(sctx, vs, shader->pm4);
+ }
break;
case PIPE_SHADER_GEOMETRY:
- si_pm4_delete_state(sctx, gs, shader->pm4);
- si_pm4_delete_state(sctx, vs, shader->gs_copy_shader->pm4);
+ if (shader->is_gs_copy_shader)
+ si_pm4_delete_state(sctx, vs, shader->pm4);
+ else
+ si_pm4_delete_state(sctx, gs, shader->pm4);
break;
case PIPE_SHADER_FRAGMENT:
si_pm4_delete_state(sctx, ps, shader->pm4);
}
}
+ si_shader_selector_reference(sctx, &shader->previous_stage_sel, NULL);
si_shader_destroy(shader);
free(shader);
}
-static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
+void si_destroy_shader_selector(struct si_context *sctx,
+ struct si_shader_selector *sel)
{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_shader_selector *sel = (struct si_shader_selector *)state;
struct si_shader *p = sel->first_variant, *c;
struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
[PIPE_SHADER_VERTEX] = &sctx->vs_shader,
[PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
};
- util_queue_job_wait(&sel->ready);
+ util_queue_drop_job(&sctx->screen->shader_compiler_queue, &sel->ready);
if (current_shader[sel->type]->cso == sel) {
current_shader[sel->type]->cso = NULL;
if (sel->main_shader_part)
si_delete_shader(sctx, sel->main_shader_part);
+ if (sel->main_shader_part_ls)
+ si_delete_shader(sctx, sel->main_shader_part_ls);
+ if (sel->main_shader_part_es)
+ si_delete_shader(sctx, sel->main_shader_part_es);
+ if (sel->gs_copy_shader)
+ si_delete_shader(sctx, sel->gs_copy_shader);
util_queue_fence_destroy(&sel->ready);
- pipe_mutex_destroy(sel->mutex);
+ mtx_destroy(&sel->mutex);
free(sel->tokens);
+ ralloc_free(sel->nir);
free(sel);
}
+static void si_delete_shader_selector(struct pipe_context *ctx, void *state)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *sel = (struct si_shader_selector *)state;
+
+ si_shader_selector_reference(sctx, &sel, NULL);
+}
+
static unsigned si_get_ps_input_cntl(struct si_context *sctx,
struct si_shader *vs, unsigned name,
unsigned index, unsigned interpolate)
index == vsinfo->output_semantic_index[j]) {
offset = vs->info.vs_output_param_offset[j];
- if (offset <= EXP_PARAM_OFFSET_31) {
+ if (offset <= AC_EXP_PARAM_OFFSET_31) {
/* The input is loaded from parameter memory. */
ps_input_cntl |= S_028644_OFFSET(offset);
} else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl)) {
- /* The input is a DEFAULT_VAL constant. */
- assert(offset >= EXP_PARAM_DEFAULT_VAL_0000 &&
- offset <= EXP_PARAM_DEFAULT_VAL_1111);
+ if (offset == AC_EXP_PARAM_UNDEFINED) {
+ /* This can happen with depth-only rendering. */
+ offset = 0;
+ } else {
+ /* The input is a DEFAULT_VAL constant. */
+ assert(offset >= AC_EXP_PARAM_DEFAULT_VAL_0000 &&
+ offset <= AC_EXP_PARAM_DEFAULT_VAL_1111);
+ offset -= AC_EXP_PARAM_DEFAULT_VAL_0000;
+ }
- offset -= EXP_PARAM_DEFAULT_VAL_0000;
ps_input_cntl = S_028644_OFFSET(0x20) |
S_028644_DEFAULT_VAL(offset);
}
}
}
- if (ps->key.ps.prolog.color_two_side) {
+ if (ps->key.part.ps.prolog.color_two_side) {
unsigned bcol = TGSI_SEMANTIC_BCOLOR;
for (i = 0; i < 2; i++) {
unsigned num_se = sctx->screen->b.info.max_se;
unsigned wave_size = 64;
unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
- unsigned gs_vertex_reuse = 16 * num_se; /* GS_VERTEX_REUSE register (per SE) */
+ /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
+ * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
+ */
+ unsigned gs_vertex_reuse = (sctx->b.chip_class >= VI ? 32 : 16) * num_se;
unsigned alignment = 256 * num_se;
/* The maximum size is 63.999 MB per SE. */
unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
unsigned esgs_ring_size = max_gs_waves * 2 * wave_size *
es->esgs_itemsize * gs->gs_input_verts_per_prim;
unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size *
- gs->max_gsvs_emit_size * (gs->max_gs_stream + 1);
+ gs->max_gsvs_emit_size;
min_esgs_ring_size = align(min_esgs_ring_size, alignment);
esgs_ring_size = align(esgs_ring_size, alignment);
/* Some rings don't have to be allocated if shaders don't use them.
* (e.g. no varyings between ES and GS or GS and VS)
+ *
+ * GFX9 doesn't have the ESGS ring.
*/
- bool update_esgs = esgs_ring_size &&
+ bool update_esgs = sctx->b.chip_class <= VI &&
+ esgs_ring_size &&
(!sctx->esgs_ring ||
sctx->esgs_ring->width0 < esgs_ring_size);
bool update_gsvs = gsvs_ring_size &&
if (update_esgs) {
pipe_resource_reference(&sctx->esgs_ring, NULL);
- sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- esgs_ring_size);
+ sctx->esgs_ring =
+ si_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ esgs_ring_size, alignment);
if (!sctx->esgs_ring)
return false;
}
if (update_gsvs) {
pipe_resource_reference(&sctx->gsvs_ring, NULL);
- sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- gsvs_ring_size);
+ sctx->gsvs_ring =
+ si_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ gsvs_ring_size, alignment);
if (!sctx->gsvs_ring)
return false;
}
return false;
if (sctx->b.chip_class >= CIK) {
- if (sctx->esgs_ring)
+ if (sctx->esgs_ring) {
+ assert(sctx->b.chip_class <= VI);
si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE,
sctx->esgs_ring->width0 / 256);
+ }
if (sctx->gsvs_ring)
si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE,
sctx->gsvs_ring->width0 / 256);
/* Set ring bindings. */
if (sctx->esgs_ring) {
+ assert(sctx->b.chip_class <= VI);
si_set_ring_buffer(&sctx->b.b, SI_ES_RING_ESGS,
sctx->esgs_ring, 0, sctx->esgs_ring->width0,
true, true, 4, 64, 0);
sctx->esgs_ring, 0, sctx->esgs_ring->width0,
false, false, 0, 0, 0);
}
- if (sctx->gsvs_ring)
- si_set_ring_buffer(&sctx->b.b, SI_VS_RING_GSVS,
+ if (sctx->gsvs_ring) {
+ si_set_ring_buffer(&sctx->b.b, SI_RING_GSVS,
sctx->gsvs_ring, 0, sctx->gsvs_ring->width0,
false, false, 0, 0, 0);
+ }
+
return true;
}
-static void si_update_gsvs_ring_bindings(struct si_context *sctx)
+static void si_shader_lock(struct si_shader *shader)
{
- unsigned gsvs_itemsize = sctx->gs_shader.cso->max_gsvs_emit_size;
- uint64_t offset;
-
- if (!sctx->gsvs_ring || gsvs_itemsize == sctx->last_gsvs_itemsize)
- return;
-
- sctx->last_gsvs_itemsize = gsvs_itemsize;
-
- si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS0,
- sctx->gsvs_ring, gsvs_itemsize,
- 64, true, true, 4, 16, 0);
-
- offset = gsvs_itemsize * 64;
- si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS1,
- sctx->gsvs_ring, gsvs_itemsize,
- 64, true, true, 4, 16, offset);
-
- offset = (gsvs_itemsize * 2) * 64;
- si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS2,
- sctx->gsvs_ring, gsvs_itemsize,
- 64, true, true, 4, 16, offset);
+ mtx_lock(&shader->selector->mutex);
+ if (shader->previous_stage_sel) {
+ assert(shader->previous_stage_sel != shader->selector);
+ mtx_lock(&shader->previous_stage_sel->mutex);
+ }
+}
- offset = (gsvs_itemsize * 3) * 64;
- si_set_ring_buffer(&sctx->b.b, SI_GS_RING_GSVS3,
- sctx->gsvs_ring, gsvs_itemsize,
- 64, true, true, 4, 16, offset);
+static void si_shader_unlock(struct si_shader *shader)
+{
+ if (shader->previous_stage_sel)
+ mtx_unlock(&shader->previous_stage_sel->mutex);
+ mtx_unlock(&shader->selector->mutex);
}
/**
if (shader->config.scratch_bytes_per_wave == 0)
return 0;
+ /* Prevent race conditions when updating:
+ * - si_shader::scratch_bo
+ * - si_shader::binary::code
+ * - si_shader::previous_stage::binary::code.
+ */
+ si_shader_lock(shader);
+
/* This shader is already configured to use the current
* scratch buffer. */
- if (shader->scratch_bo == sctx->scratch_buffer)
+ if (shader->scratch_bo == sctx->scratch_buffer) {
+ si_shader_unlock(shader);
return 0;
+ }
assert(sctx->scratch_buffer);
- si_shader_apply_scratch_relocs(sctx, shader, &shader->config, scratch_va);
+ if (shader->previous_stage)
+ si_shader_apply_scratch_relocs(shader->previous_stage, scratch_va);
+
+ si_shader_apply_scratch_relocs(shader, scratch_va);
/* Replace the shader bo with a new bo that has the relocs applied. */
r = si_shader_binary_upload(sctx->screen, shader);
- if (r)
+ if (r) {
+ si_shader_unlock(shader);
return r;
+ }
/* Update the shader state to use the new shader bo. */
si_shader_init_pm4_state(sctx->screen, shader);
r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer);
+ si_shader_unlock(shader);
return 1;
}
return shader ? shader->config.scratch_bytes_per_wave : 0;
}
+static struct si_shader *si_get_tcs_current(struct si_context *sctx)
+{
+ if (!sctx->tes_shader.cso)
+ return NULL; /* tessellation disabled */
+
+ return sctx->tcs_shader.cso ? sctx->tcs_shader.current :
+ sctx->fixed_func_tcs_shader.current;
+}
+
static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx)
{
unsigned bytes = 0;
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tcs_shader.current));
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
+
+ if (sctx->tes_shader.cso) {
+ struct si_shader *tcs = si_get_tcs_current(sctx);
+
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(tcs));
+ }
return bytes;
}
+static bool si_update_scratch_relocs(struct si_context *sctx)
+{
+ struct si_shader *tcs = si_get_tcs_current(sctx);
+ int r;
+
+ /* Update the shaders, so that they are using the latest scratch.
+ * The scratch buffer may have been changed since these shaders were
+ * last used, so we still need to try to update them, even if they
+ * require scratch buffers smaller than the current size.
+ */
+ r = si_update_scratch_buffer(sctx, sctx->ps_shader.current);
+ if (r < 0)
+ return false;
+ if (r == 1)
+ si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
+
+ r = si_update_scratch_buffer(sctx, sctx->gs_shader.current);
+ if (r < 0)
+ return false;
+ if (r == 1)
+ si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
+
+ r = si_update_scratch_buffer(sctx, tcs);
+ if (r < 0)
+ return false;
+ if (r == 1)
+ si_pm4_bind_state(sctx, hs, tcs->pm4);
+
+ /* VS can be bound as LS, ES, or VS. */
+ r = si_update_scratch_buffer(sctx, sctx->vs_shader.current);
+ if (r < 0)
+ return false;
+ if (r == 1) {
+ if (sctx->tes_shader.current)
+ si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
+ else if (sctx->gs_shader.current)
+ si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
+ else
+ si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
+ }
+
+ /* TES can be bound as ES or VS. */
+ r = si_update_scratch_buffer(sctx, sctx->tes_shader.current);
+ if (r < 0)
+ return false;
+ if (r == 1) {
+ if (sctx->gs_shader.current)
+ si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
+ else
+ si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
+ }
+
+ return true;
+}
+
static bool si_update_spi_tmpring_size(struct si_context *sctx)
{
unsigned current_scratch_buffer_size =
unsigned scratch_needed_size = scratch_bytes_per_wave *
sctx->scratch_waves;
unsigned spi_tmpring_size;
- int r;
if (scratch_needed_size > 0) {
if (scratch_needed_size > current_scratch_buffer_size) {
r600_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer = (struct r600_resource*)
- pipe_buffer_create(&sctx->screen->b.b, 0,
- PIPE_USAGE_DEFAULT, scratch_needed_size);
+ si_aligned_buffer_create(&sctx->screen->b.b,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_needed_size, 256);
if (!sctx->scratch_buffer)
return false;
- sctx->emit_scratch_reloc = true;
- }
-
- /* Update the shaders, so they are using the latest scratch. The
- * scratch buffer may have been changed since these shaders were
- * last used, so we still need to try to update them, even if
- * they require scratch buffers smaller than the current size.
- */
- r = si_update_scratch_buffer(sctx, sctx->ps_shader.current);
- if (r < 0)
- return false;
- if (r == 1)
- si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
-
- r = si_update_scratch_buffer(sctx, sctx->gs_shader.current);
- if (r < 0)
- return false;
- if (r == 1)
- si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
- r = si_update_scratch_buffer(sctx, sctx->tcs_shader.current);
- if (r < 0)
- return false;
- if (r == 1)
- si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
-
- /* VS can be bound as LS, ES, or VS. */
- r = si_update_scratch_buffer(sctx, sctx->vs_shader.current);
- if (r < 0)
- return false;
- if (r == 1) {
- if (sctx->tes_shader.current)
- si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
- else if (sctx->gs_shader.current)
- si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
- else
- si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
+ si_mark_atom_dirty(sctx, &sctx->scratch_state);
+ si_context_add_resource_size(&sctx->b.b,
+ &sctx->scratch_buffer->b.b);
}
- /* TES can be bound as ES or VS. */
- r = si_update_scratch_buffer(sctx, sctx->tes_shader.current);
- if (r < 0)
+ if (!si_update_scratch_relocs(sctx))
return false;
- if (r == 1) {
- if (sctx->gs_shader.current)
- si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
- else
- si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
- }
}
/* The LLVM shader backend should be reporting aligned scratch_sizes. */
S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10);
if (spi_tmpring_size != sctx->spi_tmpring_size) {
sctx->spi_tmpring_size = spi_tmpring_size;
- sctx->emit_scratch_reloc = true;
+ si_mark_atom_dirty(sctx, &sctx->scratch_state);
}
return true;
}
static void si_init_tess_factor_ring(struct si_context *sctx)
{
- bool double_offchip_buffers = sctx->b.chip_class >= CIK;
- unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
+ bool double_offchip_buffers = sctx->b.chip_class >= CIK &&
+ sctx->b.family != CHIP_CARRIZO &&
+ sctx->b.family != CHIP_STONEY;
+ /* This must be one less than the maximum number due to a hw limitation.
+ * Various hardware bugs in SI, CIK, and GFX9 need this.
+ */
+ unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 127 : 63;
unsigned max_offchip_buffers = max_offchip_buffers_per_se *
sctx->screen->b.info.max_se;
unsigned offchip_granularity;
break;
}
- switch (sctx->b.chip_class) {
- case SI:
- max_offchip_buffers = MIN2(max_offchip_buffers, 126);
- break;
- case CIK:
- max_offchip_buffers = MIN2(max_offchip_buffers, 508);
- break;
- case VI:
- default:
- max_offchip_buffers = MIN2(max_offchip_buffers, 512);
- break;
- }
-
assert(!sctx->tf_ring);
- sctx->tf_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- 32768 * sctx->screen->b.info.max_se);
+ /* Use 64K alignment for both rings, so that we can pass the address
+ * to shaders as one SGPR containing bits [16:47].
+ */
+ sctx->tf_ring = si_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ 32768 * sctx->screen->b.info.max_se,
+ 64 * 1024);
if (!sctx->tf_ring)
return;
assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0);
- sctx->tess_offchip_ring = pipe_buffer_create(sctx->b.b.screen, 0,
- PIPE_USAGE_DEFAULT,
- max_offchip_buffers *
- sctx->screen->tess_offchip_block_dw_size * 4);
+ sctx->tess_offchip_ring =
+ si_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ max_offchip_buffers *
+ sctx->screen->tess_offchip_block_dw_size * 4,
+ 64 * 1024);
if (!sctx->tess_offchip_ring)
return;
si_init_config_add_vgt_flush(sctx);
+ uint64_t offchip_va = r600_resource(sctx->tess_offchip_ring)->gpu_address;
+ uint64_t factor_va = r600_resource(sctx->tf_ring)->gpu_address;
+ assert((offchip_va & 0xffff) == 0);
+ assert((factor_va & 0xffff) == 0);
+
+ si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_offchip_ring),
+ RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
+ si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tf_ring),
+ RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
+
/* Append these registers to the init config state. */
if (sctx->b.chip_class >= CIK) {
if (sctx->b.chip_class >= VI)
si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
S_030938_SIZE(sctx->tf_ring->width0 / 4));
si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
- r600_resource(sctx->tf_ring)->gpu_address >> 8);
+ factor_va >> 8);
+ if (sctx->b.chip_class >= GFX9)
+ si_pm4_set_reg(sctx->init_config, R_030944_VGT_TF_MEMORY_BASE_HI,
+ factor_va >> 40);
si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM,
S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
S_03093C_OFFCHIP_GRANULARITY(offchip_granularity));
si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE,
S_008988_SIZE(sctx->tf_ring->width0 / 4));
si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE,
- r600_resource(sctx->tf_ring)->gpu_address >> 8);
+ factor_va >> 8);
si_pm4_set_reg(sctx->init_config, R_0089B0_VGT_HS_OFFCHIP_PARAM,
S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers));
}
+ if (sctx->b.chip_class >= GFX9) {
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_LS_0 +
+ GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K * 4,
+ offchip_va >> 16);
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_LS_0 +
+ GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K * 4,
+ factor_va >> 16);
+ } else {
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0 +
+ GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K * 4,
+ offchip_va >> 16);
+ si_pm4_set_reg(sctx->init_config,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0 +
+ GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K * 4,
+ factor_va >> 16);
+ }
+
/* Flush the context to re-emit the init_config state.
* This is done only once in a lifetime of a context.
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->b.initial_gfx_cs_size = 0; /* force flush */
si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
-
- si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_FACTOR, sctx->tf_ring,
- 0, sctx->tf_ring->width0, false, false, 0, 0, 0);
-
- si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_OFFCHIP,
- sctx->tess_offchip_ring, 0,
- sctx->tess_offchip_ring->width0, false, false, 0, 0, 0);
}
/**
S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
}
+ if (sctx->b.chip_class >= GFX9)
+ stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
+
si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages);
}
si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
}
-static void si_update_so(struct si_context *sctx, struct si_shader_selector *shader)
-{
- struct pipe_stream_output_info *so = &shader->so;
- uint32_t enabled_stream_buffers_mask = 0;
- int i;
-
- for (i = 0; i < so->num_outputs; i++)
- enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << (so->output[i].stream * 4);
- sctx->b.streamout.enabled_stream_buffers_mask = enabled_stream_buffers_mask;
- sctx->b.streamout.stride_in_dw = shader->so.stride;
-}
-
bool si_update_shaders(struct si_context *sctx)
{
struct pipe_context *ctx = (struct pipe_context*)sctx;
+ struct si_compiler_ctx_state compiler_state;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+ struct si_shader *old_vs = si_get_vs_state(sctx);
+ bool old_clip_disable = old_vs ? old_vs->key.opt.clip_disable : false;
+ struct si_shader *old_ps = sctx->ps_shader.current;
+ unsigned old_spi_shader_col_format =
+ old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0;
int r;
+ compiler_state.tm = sctx->tm;
+ compiler_state.debug = sctx->debug;
+ compiler_state.is_debug_context = sctx->is_debug;
+
/* Update stages before GS. */
if (sctx->tes_shader.cso) {
if (!sctx->tf_ring) {
}
/* VS as LS */
- r = si_shader_select(ctx, &sctx->vs_shader);
- if (r)
- return false;
- si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
+ if (sctx->b.chip_class <= VI) {
+ r = si_shader_select(ctx, &sctx->vs_shader,
+ &compiler_state);
+ if (r)
+ return false;
+ si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4);
+ }
if (sctx->tcs_shader.cso) {
- r = si_shader_select(ctx, &sctx->tcs_shader);
+ r = si_shader_select(ctx, &sctx->tcs_shader,
+ &compiler_state);
if (r)
return false;
si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
return false;
}
- r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader);
+ r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader,
+ &compiler_state);
if (r)
return false;
si_pm4_bind_state(sctx, hs,
sctx->fixed_func_tcs_shader.current->pm4);
}
- r = si_shader_select(ctx, &sctx->tes_shader);
- if (r)
- return false;
-
if (sctx->gs_shader.cso) {
/* TES as ES */
- si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
+ if (sctx->b.chip_class <= VI) {
+ r = si_shader_select(ctx, &sctx->tes_shader,
+ &compiler_state);
+ if (r)
+ return false;
+ si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4);
+ }
} else {
/* TES as VS */
+ r = si_shader_select(ctx, &sctx->tes_shader,
+ &compiler_state);
+ if (r)
+ return false;
si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
- si_update_so(sctx, sctx->tes_shader.cso);
}
} else if (sctx->gs_shader.cso) {
- /* VS as ES */
- r = si_shader_select(ctx, &sctx->vs_shader);
- if (r)
- return false;
- si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
+ if (sctx->b.chip_class <= VI) {
+ /* VS as ES */
+ r = si_shader_select(ctx, &sctx->vs_shader,
+ &compiler_state);
+ if (r)
+ return false;
+ si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4);
+
+ si_pm4_bind_state(sctx, ls, NULL);
+ si_pm4_bind_state(sctx, hs, NULL);
+ }
} else {
/* VS as VS */
- r = si_shader_select(ctx, &sctx->vs_shader);
+ r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state);
if (r)
return false;
si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
- si_update_so(sctx, sctx->vs_shader.cso);
+ si_pm4_bind_state(sctx, ls, NULL);
+ si_pm4_bind_state(sctx, hs, NULL);
}
/* Update GS. */
if (sctx->gs_shader.cso) {
- r = si_shader_select(ctx, &sctx->gs_shader);
+ r = si_shader_select(ctx, &sctx->gs_shader, &compiler_state);
if (r)
return false;
si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
- si_pm4_bind_state(sctx, vs, sctx->gs_shader.current->gs_copy_shader->pm4);
- si_update_so(sctx, sctx->gs_shader.cso);
+ si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4);
if (!si_update_gs_ring_buffers(sctx))
return false;
-
- si_update_gsvs_ring_bindings(sctx);
} else {
si_pm4_bind_state(sctx, gs, NULL);
- si_pm4_bind_state(sctx, es, NULL);
+ if (sctx->b.chip_class <= VI)
+ si_pm4_bind_state(sctx, es, NULL);
}
si_update_vgt_shader_config(sctx);
+ if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable)
+ si_mark_atom_dirty(sctx, &sctx->clip_regs);
+
if (sctx->ps_shader.cso) {
unsigned db_shader_control;
- r = si_shader_select(ctx, &sctx->ps_shader);
+ r = si_shader_select(ctx, &sctx->ps_shader, &compiler_state);
if (r)
return false;
si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4);
si_mark_atom_dirty(sctx, &sctx->spi_map);
}
- if (sctx->b.family == CHIP_STONEY && si_pm4_state_changed(sctx, ps))
+ if (sctx->screen->b.rbplus_allowed &&
+ si_pm4_state_changed(sctx, ps) &&
+ (!old_ps ||
+ old_spi_shader_col_format !=
+ sctx->ps_shader.current->key.part.ps.epilog.spi_shader_col_format))
si_mark_atom_dirty(sctx, &sctx->cb_render_state);
if (sctx->ps_db_shader_control != db_shader_control) {
sctx->ps_db_shader_control = db_shader_control;
si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ if (sctx->screen->dpbb_allowed)
+ si_mark_atom_dirty(sctx, &sctx->dpbb_state);
}
- if (sctx->smoothing_enabled != sctx->ps_shader.current->key.ps.epilog.poly_line_smoothing) {
- sctx->smoothing_enabled = sctx->ps_shader.current->key.ps.epilog.poly_line_smoothing;
+ if (sctx->smoothing_enabled != sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing) {
+ sctx->smoothing_enabled = sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing;
si_mark_atom_dirty(sctx, &sctx->msaa_config);
if (sctx->b.chip_class == SI)
}
}
- if (si_pm4_state_changed(sctx, ls) ||
- si_pm4_state_changed(sctx, hs) ||
- si_pm4_state_changed(sctx, es) ||
- si_pm4_state_changed(sctx, gs) ||
- si_pm4_state_changed(sctx, vs) ||
- si_pm4_state_changed(sctx, ps)) {
+ if (si_pm4_state_enabled_and_changed(sctx, ls) ||
+ si_pm4_state_enabled_and_changed(sctx, hs) ||
+ si_pm4_state_enabled_and_changed(sctx, es) ||
+ si_pm4_state_enabled_and_changed(sctx, gs) ||
+ si_pm4_state_enabled_and_changed(sctx, vs) ||
+ si_pm4_state_enabled_and_changed(sctx, ps)) {
if (!si_update_spi_tmpring_size(sctx))
return false;
}
+ if (sctx->b.chip_class >= CIK) {
+ if (si_pm4_state_enabled_and_changed(sctx, ls))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_LS;
+ else if (!sctx->queued.named.ls)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_LS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, hs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_HS;
+ else if (!sctx->queued.named.hs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_HS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, es))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_ES;
+ else if (!sctx->queued.named.es)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_ES;
+
+ if (si_pm4_state_enabled_and_changed(sctx, gs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_GS;
+ else if (!sctx->queued.named.gs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_GS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, vs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_VS;
+ else if (!sctx->queued.named.vs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, ps))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_PS;
+ else if (!sctx->queued.named.ps)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_PS;
+ }
+
sctx->do_update_shaders = false;
return true;
}
+static void si_emit_scratch_state(struct si_context *sctx,
+ struct r600_atom *atom)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+
+ radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+ sctx->spi_tmpring_size);
+
+ if (sctx->scratch_buffer) {
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ sctx->scratch_buffer, RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SCRATCH_BUFFER);
+ }
+}
+
+void *si_get_blit_vs(struct si_context *sctx, enum blitter_attrib_type type,
+ unsigned num_layers)
+{
+ struct pipe_context *pipe = &sctx->b.b;
+ unsigned vs_blit_property;
+ void **vs;
+
+ switch (type) {
+ case UTIL_BLITTER_ATTRIB_NONE:
+ vs = num_layers > 1 ? &sctx->vs_blit_pos_layered :
+ &sctx->vs_blit_pos;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS;
+ break;
+ case UTIL_BLITTER_ATTRIB_COLOR:
+ vs = num_layers > 1 ? &sctx->vs_blit_color_layered :
+ &sctx->vs_blit_color;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR;
+ break;
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
+ assert(num_layers == 1);
+ vs = &sctx->vs_blit_texcoord;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD;
+ break;
+ default:
+ assert(0);
+ return NULL;
+ }
+ if (*vs)
+ return *vs;
+
+ struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX);
+ if (!ureg)
+ return NULL;
+
+ /* Tell the shader to load VS inputs from SGPRs: */
+ ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS, vs_blit_property);
+ ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true);
+
+ /* This is just a pass-through shader with 1-3 MOV instructions. */
+ ureg_MOV(ureg,
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0),
+ ureg_DECL_vs_input(ureg, 0));
+
+ if (type != UTIL_BLITTER_ATTRIB_NONE) {
+ ureg_MOV(ureg,
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0),
+ ureg_DECL_vs_input(ureg, 1));
+ }
+
+ if (num_layers > 1) {
+ struct ureg_src instance_id =
+ ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
+ struct ureg_dst layer =
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
+
+ ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X),
+ ureg_scalar(instance_id, TGSI_SWIZZLE_X));
+ }
+ ureg_END(ureg);
+
+ *vs = ureg_create_shader_and_destroy(ureg, pipe);
+ return *vs;
+}
+
void si_init_shader_functions(struct si_context *sctx)
{
si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);
+ si_init_atom(sctx, &sctx->scratch_state, &sctx->atoms.s.scratch_state,
+ si_emit_scratch_state);
sctx->b.b.create_vs_state = si_create_shader_selector;
sctx->b.b.create_tcs_state = si_create_shader_selector;