* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Christian König <christian.koenig@amd.com>
- * Marek Olšák <maraeo@gmail.com>
*/
#include "si_pipe.h"
#include "gfx9d.h"
#include "radeon/r600_cs.h"
+#include "compiler/nir/nir_serialize.h"
#include "tgsi/tgsi_parse.h"
#include "tgsi/tgsi_ureg.h"
#include "util/hash_table.h"
#include "util/crc32.h"
+#include "util/u_async_debug.h"
#include "util/u_memory.h"
#include "util/u_prim.h"
#include "util/disk_cache.h"
#include "util/mesa-sha1.h"
#include "ac_exp_param.h"
+#include "ac_shader_util.h"
/* SHADER_CACHE */
/**
- * Return the TGSI binary in a buffer. The first 4 bytes contain its size as
- * integer.
+ * Return the IR binary in a buffer. For TGSI the first 4 bytes contain its
+ * size as integer.
*/
-static void *si_get_tgsi_binary(struct si_shader_selector *sel)
+static void *si_get_ir_binary(struct si_shader_selector *sel)
{
- unsigned tgsi_size = tgsi_num_tokens(sel->tokens) *
- sizeof(struct tgsi_token);
- unsigned size = 4 + tgsi_size + sizeof(sel->so);
- char *result = (char*)MALLOC(size);
+ struct blob blob;
+ unsigned ir_size;
+ void *ir_binary;
+
+ if (sel->tokens) {
+ ir_binary = sel->tokens;
+ ir_size = tgsi_num_tokens(sel->tokens) *
+ sizeof(struct tgsi_token);
+ } else {
+ assert(sel->nir);
+ blob_init(&blob);
+ nir_serialize(&blob, sel->nir);
+ ir_binary = blob.data;
+ ir_size = blob.size;
+ }
+
+ unsigned size = 4 + ir_size + sizeof(sel->so);
+ char *result = (char*)MALLOC(size);
if (!result)
return NULL;
*((uint32_t*)result) = size;
- memcpy(result + 4, sel->tokens, tgsi_size);
- memcpy(result + 4 + tgsi_size, &sel->so, sizeof(sel->so));
+ memcpy(result + 4, ir_binary, ir_size);
+ memcpy(result + 4 + ir_size, &sel->so, sizeof(sel->so));
+
+ if (sel->nir)
+ blob_finish(&blob);
+
return result;
}
* Insert a shader into the cache. It's assumed the shader is not in the cache.
* Use si_shader_cache_load_shader before calling this.
*
- * Returns false on failure, in which case the tgsi_binary should be freed.
+ * Returns false on failure, in which case the ir_binary should be freed.
*/
static bool si_shader_cache_insert_shader(struct si_screen *sscreen,
- void *tgsi_binary,
+ void *ir_binary,
struct si_shader *shader,
bool insert_into_disk_cache)
{
struct hash_entry *entry;
uint8_t key[CACHE_KEY_SIZE];
- entry = _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
+ entry = _mesa_hash_table_search(sscreen->shader_cache, ir_binary);
if (entry)
return false; /* already added */
if (!hw_binary)
return false;
- if (_mesa_hash_table_insert(sscreen->shader_cache, tgsi_binary,
+ if (_mesa_hash_table_insert(sscreen->shader_cache, ir_binary,
hw_binary) == NULL) {
FREE(hw_binary);
return false;
}
- if (sscreen->b.disk_shader_cache && insert_into_disk_cache) {
- disk_cache_compute_key(sscreen->b.disk_shader_cache, tgsi_binary,
- *((uint32_t *)tgsi_binary), key);
- disk_cache_put(sscreen->b.disk_shader_cache, key, hw_binary,
- *((uint32_t *) hw_binary));
+ if (sscreen->disk_shader_cache && insert_into_disk_cache) {
+ disk_cache_compute_key(sscreen->disk_shader_cache, ir_binary,
+ *((uint32_t *)ir_binary), key);
+ disk_cache_put(sscreen->disk_shader_cache, key, hw_binary,
+ *((uint32_t *) hw_binary), NULL);
}
return true;
}
static bool si_shader_cache_load_shader(struct si_screen *sscreen,
- void *tgsi_binary,
+ void *ir_binary,
struct si_shader *shader)
{
struct hash_entry *entry =
- _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
+ _mesa_hash_table_search(sscreen->shader_cache, ir_binary);
if (!entry) {
- if (sscreen->b.disk_shader_cache) {
+ if (sscreen->disk_shader_cache) {
unsigned char sha1[CACHE_KEY_SIZE];
- size_t tg_size = *((uint32_t *) tgsi_binary);
+ size_t tg_size = *((uint32_t *) ir_binary);
- disk_cache_compute_key(sscreen->b.disk_shader_cache,
- tgsi_binary, tg_size, sha1);
+ disk_cache_compute_key(sscreen->disk_shader_cache,
+ ir_binary, tg_size, sha1);
size_t binary_size;
uint8_t *buffer =
- disk_cache_get(sscreen->b.disk_shader_cache,
+ disk_cache_get(sscreen->disk_shader_cache,
sha1, &binary_size);
if (!buffer)
return false;
assert(!"Invalid radeonsi shader disk cache "
"item!");
- disk_cache_remove(sscreen->b.disk_shader_cache,
+ disk_cache_remove(sscreen->disk_shader_cache,
sha1);
free(buffer);
}
free(buffer);
- if (!si_shader_cache_insert_shader(sscreen, tgsi_binary,
+ if (!si_shader_cache_insert_shader(sscreen, ir_binary,
shader, false))
- FREE(tgsi_binary);
+ FREE(ir_binary);
} else {
return false;
}
} else {
if (si_load_shader_binary(shader, entry->data))
- FREE(tgsi_binary);
+ FREE(ir_binary);
else
return false;
}
- p_atomic_inc(&sscreen->b.num_shader_cache_hits);
+ p_atomic_inc(&sscreen->num_shader_cache_hits);
return true;
}
topology = V_028B6C_OUTPUT_TRIANGLE_CW;
if (sscreen->has_distributed_tess) {
- if (sscreen->b.family == CHIP_FIJI ||
- sscreen->b.family >= CHIP_POLARIS10)
+ if (sscreen->info.family == CHIP_FIJI ||
+ sscreen->info.family >= CHIP_POLARIS10)
distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
else
distribution_mode = V_028B6C_DISTRIBUTION_MODE_DONUTS;
{
unsigned type = sel->type;
- if (sscreen->b.family < CHIP_POLARIS10)
+ if (sscreen->info.family < CHIP_POLARIS10)
return;
/* VS as VS, or VS as ES: */
return shader->pm4;
}
+static unsigned si_get_num_vs_user_sgprs(unsigned num_always_on_user_sgprs)
+{
+ /* Add the pointer to VBO descriptors. */
+ if (HAVE_32BIT_POINTERS) {
+ return num_always_on_user_sgprs + 1;
+ } else {
+ assert(num_always_on_user_sgprs % 2 == 0);
+ return num_always_on_user_sgprs + 2;
+ }
+}
+
static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
{
struct si_pm4_state *pm4;
unsigned vgpr_comp_cnt;
uint64_t va;
- assert(sscreen->b.chip_class <= VI);
+ assert(sscreen->info.chip_class <= VI);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
- si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, va >> 40);
+ si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, S_00B524_MEM_BASE(va >> 40));
shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) |
S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) |
S_00B528_DX10_CLAMP(1) |
S_00B528_FLOAT_MODE(shader->config.float_mode);
- shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_VS_NUM_USER_SGPR) |
+ shader->config.rsrc2 = S_00B52C_USER_SGPR(si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR)) |
S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
}
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- if (sscreen->b.chip_class >= GFX9) {
+ if (sscreen->info.chip_class >= GFX9) {
si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
- si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, va >> 40);
+ si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, S_00B414_MEM_BASE(va >> 40));
/* We need at least 2 components for LS.
* VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
*/
ls_vgpr_comp_cnt = shader->info.uses_instanceid ? 2 : 1;
+ unsigned num_user_sgprs =
+ si_get_num_vs_user_sgprs(GFX9_TCS_NUM_USER_SGPR);
+
shader->config.rsrc2 =
- S_00B42C_USER_SGPR(GFX9_TCS_NUM_USER_SGPR) |
- S_00B42C_USER_SGPR_MSB(GFX9_TCS_NUM_USER_SGPR >> 5) |
+ S_00B42C_USER_SGPR(num_user_sgprs) |
+ S_00B42C_USER_SGPR_MSB(num_user_sgprs >> 5) |
S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
} else {
si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8);
- si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40);
+ si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, S_00B424_MEM_BASE(va >> 40));
shader->config.rsrc2 =
S_00B42C_USER_SGPR(GFX6_TCS_NUM_USER_SGPR) |
S_00B428_FLOAT_MODE(shader->config.float_mode) |
S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt));
- if (sscreen->b.chip_class <= VI) {
+ if (sscreen->info.chip_class <= VI) {
si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
shader->config.rsrc2);
}
uint64_t va;
unsigned oc_lds_en;
- assert(sscreen->b.chip_class <= VI);
+ assert(sscreen->info.chip_class <= VI);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
if (shader->selector->type == PIPE_SHADER_VERTEX) {
/* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
vgpr_comp_cnt = shader->info.uses_instanceid ? 1 : 0;
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
+ num_user_sgprs = si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR);
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
vgpr_comp_cnt = shader->selector->info.uses_primid ? 3 : 2;
num_user_sgprs = SI_TES_NUM_USER_SGPR;
si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
shader->selector->esgs_itemsize / 4);
si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
- si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40);
+ si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, S_00B324_MEM_BASE(va >> 40));
si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES,
S_00B328_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B328_SGPRS((shader->config.num_sgprs - 1) / 8) |
polaris_set_vgt_vertex_reuse(sscreen, shader->selector, shader, pm4);
}
-/**
- * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
- * geometry shader.
- */
-static uint32_t si_vgt_gs_mode(struct si_shader_selector *sel)
-{
- enum chip_class chip_class = sel->screen->b.chip_class;
- unsigned gs_max_vert_out = sel->gs_max_out_vertices;
- unsigned cut_mode;
-
- if (gs_max_vert_out <= 128) {
- cut_mode = V_028A40_GS_CUT_128;
- } else if (gs_max_vert_out <= 256) {
- cut_mode = V_028A40_GS_CUT_256;
- } else if (gs_max_vert_out <= 512) {
- cut_mode = V_028A40_GS_CUT_512;
- } else {
- assert(gs_max_vert_out <= 1024);
- cut_mode = V_028A40_GS_CUT_1024;
- }
-
- return S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
- S_028A40_CUT_MODE(cut_mode)|
- S_028A40_ES_WRITE_OPTIMIZE(chip_class <= VI) |
- S_028A40_GS_WRITE_OPTIMIZE(1) |
- S_028A40_ONCHIP(chip_class >= GFX9 ? 1 : 0);
-}
-
struct gfx9_gs_info {
unsigned es_verts_per_subgroup;
unsigned gs_prims_per_subgroup;
/* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
* Make sure we don't go over the maximum value.
*/
- max_gs_prims = MIN2(max_gs_prims,
- max_out_prims /
- (gs->gs_max_out_vertices * gs_num_invocations));
+ if (gs->gs_max_out_vertices > 0) {
+ max_gs_prims = MIN2(max_gs_prims,
+ max_out_prims /
+ (gs->gs_max_out_vertices * gs_num_invocations));
+ }
assert(max_gs_prims > 0);
/* If the primitive has adjacency, halve the number of vertices
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- if (sscreen->b.chip_class >= GFX9) {
+ if (sscreen->info.chip_class >= GFX9) {
unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
unsigned es_type = shader->key.part.gs.es->type;
unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
else
gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
+ unsigned num_user_sgprs;
+ if (es_type == PIPE_SHADER_VERTEX)
+ num_user_sgprs = si_get_num_vs_user_sgprs(GFX9_VSGS_NUM_USER_SGPR);
+ else
+ num_user_sgprs = GFX9_TESGS_NUM_USER_SGPR;
+
gfx9_get_gs_info(shader->key.part.gs.es, sel, &gs_info);
si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
- si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, va >> 40);
+ si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES, S_00B214_MEM_BASE(va >> 40));
si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B228_FLOAT_MODE(shader->config.float_mode) |
S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
- S_00B22C_USER_SGPR(GFX9_GS_NUM_USER_SGPR) |
- S_00B22C_USER_SGPR_MSB(GFX9_GS_NUM_USER_SGPR >> 5) |
+ S_00B22C_USER_SGPR(num_user_sgprs) |
+ S_00B22C_USER_SGPR_MSB(num_user_sgprs >> 5) |
S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
S_00B22C_OC_LDS_EN(es_type == PIPE_SHADER_TESS_EVAL) |
S_00B22C_LDS_SIZE(gs_info.lds_size) |
NULL, pm4);
} else {
si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8);
- si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40);
+ si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, S_00B224_MEM_BASE(va >> 40));
si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS,
S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) |
static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
struct si_shader_selector *gs)
{
+ const struct tgsi_shader_info *info = &shader->selector->info;
struct si_pm4_state *pm4;
unsigned num_user_sgprs;
unsigned nparams, vgpr_comp_cnt;
uint64_t va;
unsigned oc_lds_en;
unsigned window_space =
- shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
- bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || shader->selector->info.uses_primid;
+ info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
+ bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || info->uses_primid;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
* not sent again.
*/
if (!gs) {
- unsigned mode = 0;
+ unsigned mode = V_028A40_GS_OFF;
- /* PrimID needs GS scenario A.
- * GFX9 also needs it when ViewportIndex is enabled.
- */
- if (enable_prim_id ||
- (sscreen->b.chip_class >= GFX9 &&
- shader->selector->info.writes_viewport_index))
+ /* PrimID needs GS scenario A. */
+ if (enable_prim_id)
mode = V_028A40_GS_SCENARIO_A;
si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, S_028A40_MODE(mode));
si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id);
} else {
- si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs));
+ si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE,
+ ac_vgt_gs_mode(gs->gs_max_out_vertices,
+ sscreen->info.chip_class));
si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
}
+ if (sscreen->info.chip_class <= VI) {
+ /* Reuse needs to be set off if we write oViewport. */
+ si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF,
+ S_028AB4_REUSE_OFF(info->writes_viewport_index));
+ }
+
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
vgpr_comp_cnt = enable_prim_id ? 2 : (shader->info.uses_instanceid ? 1 : 0);
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
+
+ if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) {
+ num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
+ info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+ } else {
+ num_user_sgprs = si_get_num_vs_user_sgprs(SI_VS_NUM_USER_SGPR);
+ }
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
vgpr_comp_cnt = enable_prim_id ? 3 : 2;
num_user_sgprs = SI_TES_NUM_USER_SGPR;
oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0;
si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
- si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
+ si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, S_00B124_MEM_BASE(va >> 40));
si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) |
S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8) |
return value;
}
-static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format)
-{
- unsigned i, cb_shader_mask = 0;
-
- for (i = 0; i < 8; i++) {
- switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
- case V_028714_SPI_SHADER_ZERO:
- break;
- case V_028714_SPI_SHADER_32_R:
- cb_shader_mask |= 0x1 << (i * 4);
- break;
- case V_028714_SPI_SHADER_32_GR:
- cb_shader_mask |= 0x3 << (i * 4);
- break;
- case V_028714_SPI_SHADER_32_AR:
- cb_shader_mask |= 0x9 << (i * 4);
- break;
- case V_028714_SPI_SHADER_FP16_ABGR:
- case V_028714_SPI_SHADER_UNORM16_ABGR:
- case V_028714_SPI_SHADER_SNORM16_ABGR:
- case V_028714_SPI_SHADER_UINT16_ABGR:
- case V_028714_SPI_SHADER_SINT16_ABGR:
- case V_028714_SPI_SHADER_32_ABGR:
- cb_shader_mask |= 0xf << (i * 4);
- break;
- default:
- assert(0);
- }
- }
- return cb_shader_mask;
-}
-
static void si_shader_ps(struct si_shader *shader)
{
struct tgsi_shader_info *info = &shader->selector->info;
spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1);
spi_shader_col_format = si_get_spi_shader_col_format(shader);
- cb_shader_mask = si_get_cb_shader_mask(spi_shader_col_format);
+ cb_shader_mask = ac_get_cb_shader_mask(spi_shader_col_format);
/* Ensure that some export memory is always allocated, for two reasons:
*
si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT,
- si_get_spi_shader_z_format(info->writes_z,
+ ac_get_spi_shader_z_format(info->writes_z,
info->writes_stencil,
info->writes_samplemask));
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
- si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
+ si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, S_00B024_MEM_BASE(va >> 40));
si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) |
if (!sctx->vertex_elements)
return;
+ prolog_key->instance_divisor_is_one =
+ sctx->vertex_elements->instance_divisor_is_one;
+ prolog_key->instance_divisor_is_fetched =
+ sctx->vertex_elements->instance_divisor_is_fetched;
+
+ /* Prefer a monolithic shader to allow scheduling divisions around
+ * VBO loads. */
+ if (prolog_key->instance_divisor_is_fetched)
+ key->opt.prefer_mono = 1;
+
unsigned count = MIN2(vs->info.num_inputs,
sctx->vertex_elements->count);
- for (unsigned i = 0; i < count; ++i) {
- prolog_key->instance_divisors[i] =
- sctx->vertex_elements->elements[i].instance_divisor;
- }
-
memcpy(key->mono.vs_fix_fetch, sctx->vertex_elements->fix_fetch, count);
}
{
struct si_shader_selector *ps = sctx->ps_shader.cso;
- key->opt.hw_vs.clip_disable =
+ key->opt.clip_disable =
sctx->queued.named.rasterizer->clip_plane_enable == 0 &&
(vs->info.clipdist_writemask ||
vs->info.writes_clipvertex) &&
/* Find out if PS is disabled. */
bool ps_disabled = true;
if (ps) {
+ const struct si_state_blend *blend = sctx->queued.named.blend;
+ bool alpha_to_coverage = blend && blend->alpha_to_coverage;
bool ps_modifies_zs = ps->info.uses_kill ||
ps->info.writes_z ||
ps->info.writes_stencil ||
ps->info.writes_samplemask ||
+ alpha_to_coverage ||
si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS;
unsigned ps_colormask = sctx->framebuffer.colorbuf_enabled_4bit &
uint64_t linked = outputs_written & inputs_read;
- key->opt.hw_vs.kill_outputs = ~linked & outputs_written;
+ key->opt.kill_outputs = ~linked & outputs_written;
}
/* Compute the key for the hw shader variant */
si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
key, &key->part.tcs.ls_prolog);
key->part.tcs.ls = sctx->vs_shader.cso;
+
+ /* When the LS VGPR fix is needed, monolithic shaders
+ * can:
+ * - avoid initializing EXEC in both the LS prolog
+ * and the LS main part when !vs_needs_prolog
+ * - remove the fixup for unused input VGPRs
+ */
+ key->part.tcs.ls_prolog.ls_vgpr_fix = sctx->ls_vgpr_fix;
+
+ /* The LS output / HS input layout can be communicated
+ * directly instead of via user SGPRs for merged LS-HS.
+ * The LS VGPR fix prefers this too.
+ */
+ key->opt.prefer_mono = 1;
}
key->part.tcs.epilog.prim_mode =
sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ key->part.tcs.epilog.invoc0_tess_factors_are_def =
+ sel->tcs_info.tessfactors_are_def_in_all_invocs;
key->part.tcs.epilog.tes_reads_tess_factors =
sctx->tes_shader.cso->info.reads_tess_factors;
si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
key, &key->part.gs.vs_prolog);
key->part.gs.es = sctx->vs_shader.cso;
+ key->part.gs.prolog.gfx9_prev_is_vs = 1;
}
/* Merged ES-GS can have unbalanced wave usage.
sctx->framebuffer.spi_shader_col_format_alpha) |
(~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
sctx->framebuffer.spi_shader_col_format);
+ key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
/* The output for dual source blending should have
* the same format as the first output.
sctx->framebuffer.nr_samples <= 1;
key->part.ps.epilog.clamp_color = rs->clamp_fragment_color;
+ if (sctx->ps_iter_samples > 1 &&
+ sel->info.reads_samplemask) {
+ key->part.ps.prolog.samplemask_log_ps_iter =
+ util_logbase2(util_next_power_of_two(sctx->ps_iter_samples));
+ }
+
if (rs->force_persample_interp &&
rs->multisample_enable &&
sctx->framebuffer.nr_samples > 1 &&
sel->info.uses_linear_center +
sel->info.uses_linear_centroid +
sel->info.uses_linear_sample > 1;
+
+ if (sel->info.opcode_count[TGSI_OPCODE_INTERP_SAMPLE])
+ key->mono.u.ps.interpolate_at_sample_force_center = 1;
}
}
key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
+
+ /* ps_uses_fbfetch is true only if the color buffer is bound. */
+ if (sctx->ps_uses_fbfetch) {
+ struct pipe_surface *cb0 = sctx->framebuffer.state.cbufs[0];
+ struct pipe_resource *tex = cb0->texture;
+
+ /* 1D textures are allocated and used as 2D on GFX9. */
+ key->mono.u.ps.fbfetch_msaa = sctx->framebuffer.nr_samples > 1;
+ key->mono.u.ps.fbfetch_is_1D = sctx->b.chip_class != GFX9 &&
+ (tex->target == PIPE_TEXTURE_1D ||
+ tex->target == PIPE_TEXTURE_1D_ARRAY);
+ key->mono.u.ps.fbfetch_layered = tex->target == PIPE_TEXTURE_1D_ARRAY ||
+ tex->target == PIPE_TEXTURE_2D_ARRAY ||
+ tex->target == PIPE_TEXTURE_CUBE ||
+ tex->target == PIPE_TEXTURE_CUBE_ARRAY ||
+ tex->target == PIPE_TEXTURE_3D;
+ }
break;
}
default:
assert(0);
}
- if (unlikely(sctx->screen->b.debug_flags & DBG_NO_OPT_VARIANT))
+ if (unlikely(sctx->screen->debug_flags & DBG(NO_OPT_VARIANT)))
memset(&key->opt, 0, sizeof(key->opt));
}
-static void si_build_shader_variant(void *job, int thread_index)
+static void si_build_shader_variant(struct si_shader *shader,
+ int thread_index,
+ bool low_priority)
{
- struct si_shader *shader = (struct si_shader *)job;
struct si_shader_selector *sel = shader->selector;
struct si_screen *sscreen = sel->screen;
LLVMTargetMachineRef tm;
int r;
if (thread_index >= 0) {
- assert(thread_index < ARRAY_SIZE(sscreen->tm_low_priority));
- tm = sscreen->tm_low_priority[thread_index];
+ if (low_priority) {
+ assert(thread_index < ARRAY_SIZE(sscreen->tm_low_priority));
+ tm = sscreen->tm_low_priority[thread_index];
+ } else {
+ assert(thread_index < ARRAY_SIZE(sscreen->tm));
+ tm = sscreen->tm[thread_index];
+ }
if (!debug->async)
debug = NULL;
} else {
+ assert(!low_priority);
tm = shader->compiler_ctx_state.tm;
}
si_shader_init_pm4_state(sscreen, shader);
}
+static void si_build_shader_variant_low_priority(void *job, int thread_index)
+{
+ struct si_shader *shader = (struct si_shader *)job;
+
+ assert(thread_index >= 0);
+
+ si_build_shader_variant(shader, thread_index, true);
+}
+
static const struct si_shader_key zeroed;
static bool si_check_missing_main_part(struct si_screen *sscreen,
if (!main_part)
return false;
+ /* We can leave the fence as permanently signaled because the
+ * main part becomes visible globally only after it has been
+ * compiled. */
+ util_queue_fence_init(&main_part->ready);
+
main_part->selector = sel;
main_part->key.as_es = key->as_es;
main_part->key.as_ls = key->as_ls;
return true;
}
-static void si_destroy_shader_selector(struct si_context *sctx,
- struct si_shader_selector *sel);
-
-static void si_shader_selector_reference(struct si_context *sctx,
- struct si_shader_selector **dst,
- struct si_shader_selector *src)
-{
- if (pipe_reference(&(*dst)->reference, &src->reference))
- si_destroy_shader_selector(sctx, *dst);
-
- *dst = src;
-}
-
/* Select the hw shader variant depending on the current state. */
static int si_shader_select_with_key(struct si_screen *sscreen,
struct si_shader_ctx_state *state,
* variants, it will cost just a computation of the key and this
* test. */
if (likely(current &&
- memcmp(¤t->key, key, sizeof(*key)) == 0 &&
- (!current->is_optimized ||
- util_queue_fence_is_signalled(¤t->optimized_ready))))
+ memcmp(¤t->key, key, sizeof(*key)) == 0)) {
+ if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) {
+ if (current->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ goto current_not_ready;
+ }
+
+ util_queue_fence_wait(¤t->ready);
+ }
+
return current->compilation_failed ? -1 : 0;
+ }
+current_not_ready:
/* This must be done before the mutex is locked, because async GS
* compilation calls this function too, and therefore must enter
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
- /* If it's an optimized shader and its compilation has
- * been started but isn't done, use the unoptimized
- * shader so as not to cause a stall due to compilation.
- */
- if (iter->is_optimized &&
- !util_queue_fence_is_signalled(&iter->optimized_ready)) {
- memset(&key->opt, 0, sizeof(key->opt));
- mtx_unlock(&sel->mutex);
- goto again;
+ mtx_unlock(&sel->mutex);
+
+ if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
+ /* If it's an optimized shader and its compilation has
+ * been started but isn't done, use the unoptimized
+ * shader so as not to cause a stall due to compilation.
+ */
+ if (iter->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ goto again;
+ }
+
+ util_queue_fence_wait(&iter->ready);
}
if (iter->compilation_failed) {
- mtx_unlock(&sel->mutex);
return -1; /* skip the draw call */
}
state->current = iter;
- mtx_unlock(&sel->mutex);
return 0;
}
}
mtx_unlock(&sel->mutex);
return -ENOMEM;
}
+
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
shader->key = *key;
shader->compiler_ctx_state = *compiler_state;
/* If this is a merged shader, get the first shader's selector. */
- if (sscreen->b.chip_class >= GFX9) {
+ if (sscreen->info.chip_class >= GFX9) {
if (sel->type == PIPE_SHADER_TESS_CTRL)
previous_stage_sel = key->part.tcs.ls;
else if (sel->type == PIPE_SHADER_GEOMETRY)
shader->is_optimized =
!is_pure_monolithic &&
memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
- if (shader->is_optimized)
- util_queue_fence_init(&shader->optimized_ready);
-
- if (!sel->last_variant) {
- sel->first_variant = shader;
- sel->last_variant = shader;
- } else {
- sel->last_variant->next_variant = shader;
- sel->last_variant = shader;
- }
/* If it's an optimized shader, compile it asynchronously. */
if (shader->is_optimized &&
thread_index < 0) {
/* Compile it asynchronously. */
util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
- shader, &shader->optimized_ready,
- si_build_shader_variant, NULL);
+ shader, &shader->ready,
+ si_build_shader_variant_low_priority, NULL);
+
+ /* Add only after the ready fence was reset, to guard against a
+ * race with si_bind_XX_shader. */
+ if (!sel->last_variant) {
+ sel->first_variant = shader;
+ sel->last_variant = shader;
+ } else {
+ sel->last_variant->next_variant = shader;
+ sel->last_variant = shader;
+ }
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
goto again;
}
+ /* Reset the fence before adding to the variant list. */
+ util_queue_fence_reset(&shader->ready);
+
+ if (!sel->last_variant) {
+ sel->first_variant = shader;
+ sel->last_variant = shader;
+ } else {
+ sel->last_variant->next_variant = shader;
+ sel->last_variant = shader;
+ }
+
+ mtx_unlock(&sel->mutex);
+
assert(!shader->is_optimized);
- si_build_shader_variant(shader, thread_index);
+ si_build_shader_variant(shader, thread_index, false);
+
+ util_queue_fence_signal(&shader->ready);
if (!shader->compilation_failed)
state->current = shader;
- mtx_unlock(&sel->mutex);
return shader->compilation_failed ? -1 : 0;
}
}
static void si_parse_next_shader_property(const struct tgsi_shader_info *info,
+ bool streamout,
struct si_shader_key *key)
{
unsigned next_shader = info->properties[TGSI_PROPERTY_NEXT_SHADER];
key->as_ls = 1;
break;
default:
- /* If POSITION isn't written, it can't be a HW VS.
- * Assume that it's a HW LS. (the next shader is TCS)
+ /* If POSITION isn't written, it can only be a HW VS
+ * if streamout is used. If streamout isn't used,
+ * assume that it's a HW LS. (the next shader is TCS)
* This heuristic is needed for separate shader objects.
*/
- if (!info->writes_position)
+ if (!info->writes_position && !streamout)
key->as_ls = 1;
}
break;
* si_shader_selector initialization. Since it can be done asynchronously,
* there is no way to report compile failures to applications.
*/
-void si_init_shader_selector_async(void *job, int thread_index)
+static void si_init_shader_selector_async(void *job, int thread_index)
{
struct si_shader_selector *sel = (struct si_shader_selector *)job;
struct si_screen *sscreen = sel->screen;
LLVMTargetMachineRef tm;
struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
- unsigned i;
- if (thread_index >= 0) {
- assert(thread_index < ARRAY_SIZE(sscreen->tm));
- tm = sscreen->tm[thread_index];
- if (!debug->async)
- debug = NULL;
- } else {
- tm = sel->compiler_ctx_state.tm;
- }
+ assert(!debug->debug_message || debug->async);
+ assert(thread_index >= 0);
+ assert(thread_index < ARRAY_SIZE(sscreen->tm));
+ tm = sscreen->tm[thread_index];
/* Compile the main shader part for use with a prolog and/or epilog.
* If this fails, the driver will try to compile a monolithic shader
*/
if (!sscreen->use_monolithic_shaders) {
struct si_shader *shader = CALLOC_STRUCT(si_shader);
- void *tgsi_binary;
+ void *ir_binary = NULL;
if (!shader) {
fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
return;
}
+ /* We can leave the fence signaled because use of the default
+ * main part is guarded by the selector's ready fence. */
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
- si_parse_next_shader_property(&sel->info, &shader->key);
+ si_parse_next_shader_property(&sel->info,
+ sel->so.num_outputs != 0,
+ &shader->key);
- tgsi_binary = si_get_tgsi_binary(sel);
+ if (sel->tokens || sel->nir)
+ ir_binary = si_get_ir_binary(sel);
/* Try to load the shader from the shader cache. */
mtx_lock(&sscreen->shader_cache_mutex);
- if (tgsi_binary &&
- si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) {
+ if (ir_binary &&
+ si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
mtx_unlock(&sscreen->shader_cache_mutex);
+ si_shader_dump_stats_for_shader_db(shader, debug);
} else {
mtx_unlock(&sscreen->shader_cache_mutex);
if (si_compile_tgsi_shader(sscreen, tm, shader, false,
debug) != 0) {
FREE(shader);
- FREE(tgsi_binary);
+ FREE(ir_binary);
fprintf(stderr, "radeonsi: can't compile a main shader part\n");
return;
}
- if (tgsi_binary) {
+ if (ir_binary) {
mtx_lock(&sscreen->shader_cache_mutex);
- if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader, true))
- FREE(tgsi_binary);
+ if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
+ FREE(ir_binary);
mtx_unlock(&sscreen->shader_cache_mutex);
}
}
}
}
- /* Pre-compilation. */
- if (sscreen->b.debug_flags & DBG_PRECOMPILE) {
- struct si_shader_ctx_state state = {sel};
- struct si_shader_key key;
-
- memset(&key, 0, sizeof(key));
- si_parse_next_shader_property(&sel->info, &key);
-
- /* Set reasonable defaults, so that the shader key doesn't
- * cause any code to be eliminated.
- */
- switch (sel->type) {
- case PIPE_SHADER_TESS_CTRL:
- key.part.tcs.epilog.prim_mode = PIPE_PRIM_TRIANGLES;
- break;
- case PIPE_SHADER_FRAGMENT:
- key.part.ps.prolog.bc_optimize_for_persp =
- sel->info.uses_persp_center &&
- sel->info.uses_persp_centroid;
- key.part.ps.prolog.bc_optimize_for_linear =
- sel->info.uses_linear_center &&
- sel->info.uses_linear_centroid;
- key.part.ps.epilog.alpha_func = PIPE_FUNC_ALWAYS;
- for (i = 0; i < 8; i++)
- if (sel->info.colors_written & (1 << i))
- key.part.ps.epilog.spi_shader_col_format |=
- V_028710_SPI_SHADER_FP16_ABGR << (i * 4);
- break;
- }
-
- if (si_shader_select_with_key(sscreen, &state,
- &sel->compiler_ctx_state, &key,
- thread_index))
- fprintf(stderr, "radeonsi: can't create a monolithic shader\n");
- }
-
/* The GS copy shader is always pre-compiled. */
if (sel->type == PIPE_SHADER_GEOMETRY) {
sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, tm, sel, debug);
pipe_reference_init(&sel->reference, 1);
sel->screen = sscreen;
- sel->compiler_ctx_state.tm = sctx->tm;
- sel->compiler_ctx_state.debug = sctx->b.debug;
+ sel->compiler_ctx_state.debug = sctx->debug;
sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
- sel->tokens = tgsi_dup_tokens(state->tokens);
- if (!sel->tokens) {
- FREE(sel);
- return NULL;
- }
sel->so = state->stream_output;
- tgsi_scan_shader(state->tokens, &sel->info);
+
+ if (state->type == PIPE_SHADER_IR_TGSI) {
+ sel->tokens = tgsi_dup_tokens(state->tokens);
+ if (!sel->tokens) {
+ FREE(sel);
+ return NULL;
+ }
+
+ tgsi_scan_shader(state->tokens, &sel->info);
+ tgsi_scan_tess_ctrl(state->tokens, &sel->info, &sel->tcs_info);
+ } else {
+ assert(state->type == PIPE_SHADER_IR_NIR);
+
+ sel->nir = state->ir.nir;
+
+ si_nir_scan_shader(sel->nir, &sel->info);
+ si_nir_scan_tess_ctrl(sel->nir, &sel->info, &sel->tcs_info);
+
+ si_lower_nir(sel);
+ }
+
sel->type = sel->info.processor;
- p_atomic_inc(&sscreen->b.num_shaders_created);
+ p_atomic_inc(&sscreen->num_shaders_created);
si_get_active_slot_masks(&sel->info,
&sel->active_const_and_shader_buffers,
&sel->active_samplers_and_images);
/* The prolog is a no-op if there are no inputs. */
sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX &&
- sel->info.num_inputs;
+ sel->info.num_inputs &&
+ !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+
+ sel->force_correct_derivs_after_kill =
+ sel->type == PIPE_SHADER_FRAGMENT &&
+ sel->info.uses_derivatives &&
+ sel->info.uses_kill &&
+ sctx->screen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL);
/* Set which opcode uses which (i,j) pair. */
if (sel->info.uses_persp_opcode_interp_centroid)
case PIPE_SHADER_TESS_CTRL:
/* Always reserve space for these. */
sel->patch_outputs_written |=
- (1llu << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0)) |
- (1llu << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0));
+ (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0)) |
+ (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0));
/* fall through */
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_EVAL:
case TGSI_SEMANTIC_TESSOUTER:
case TGSI_SEMANTIC_PATCH:
sel->patch_outputs_written |=
- 1llu << si_shader_io_get_unique_index_patch(name, index);
+ 1ull << si_shader_io_get_unique_index_patch(name, index);
break;
case TGSI_SEMANTIC_GENERIC:
/* fall through */
default:
sel->outputs_written |=
- 1llu << si_shader_io_get_unique_index(name, index);
+ 1ull << si_shader_io_get_unique_index(name, index);
break;
case TGSI_SEMANTIC_CLIPVERTEX: /* ignore these */
case TGSI_SEMANTIC_EDGEFLAG:
/* fall through */
default:
sel->inputs_read |=
- 1llu << si_shader_io_get_unique_index(name, index);
+ 1ull << si_shader_io_get_unique_index(name, index);
break;
case TGSI_SEMANTIC_PCOORD: /* ignore this */
break;
(void) mtx_init(&sel->mutex, mtx_plain);
util_queue_fence_init(&sel->ready);
- if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
- sctx->is_debug ||
- r600_can_dump_shader(&sscreen->b, sel->info.processor))
- si_init_shader_selector_async(sel, -1);
- else
- util_queue_add_job(&sscreen->shader_compiler_queue, sel,
- &sel->ready, si_init_shader_selector_async,
- NULL);
+ struct util_async_debug_callback async_debug;
+ bool wait =
+ (sctx->debug.debug_message && !sctx->debug.async) ||
+ sctx->is_debug ||
+ si_can_dump_shader(sscreen, sel->info.processor);
+
+ if (wait) {
+ u_async_debug_init(&async_debug);
+ sel->compiler_ctx_state.debug = async_debug.base;
+ }
+
+ util_queue_add_job(&sscreen->shader_compiler_queue, sel,
+ &sel->ready, si_init_shader_selector_async,
+ NULL);
+
+ if (wait) {
+ util_queue_fence_wait(&sel->ready);
+ u_async_debug_drain(&async_debug, &sctx->debug);
+ u_async_debug_cleanup(&async_debug);
+ }
return sel;
}
if (!shader_with_so)
return;
- sctx->b.streamout.enabled_stream_buffers_mask =
+ sctx->streamout.enabled_stream_buffers_mask =
shader_with_so->enabled_streamout_buffer_mask;
- sctx->b.streamout.stride_in_dw = shader_with_so->so.stride;
+ sctx->streamout.stride_in_dw = shader_with_so->so.stride;
}
static void si_update_clip_regs(struct si_context *sctx,
old_hw_vs->culldist_mask != next_hw_vs->culldist_mask ||
!old_hw_vs_variant ||
!next_hw_vs_variant ||
- old_hw_vs_variant->key.opt.hw_vs.clip_disable !=
- next_hw_vs_variant->key.opt.hw_vs.clip_disable))
+ old_hw_vs_variant->key.opt.clip_disable !=
+ next_hw_vs_variant->key.opt.clip_disable))
si_mark_atom_dirty(sctx, &sctx->clip_regs);
}
+static void si_update_common_shader_state(struct si_context *sctx)
+{
+ sctx->uses_bindless_samplers =
+ si_shader_uses_bindless_samplers(sctx->vs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->gs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->ps_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->tcs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->tes_shader.cso);
+ sctx->uses_bindless_images =
+ si_shader_uses_bindless_images(sctx->vs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->gs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->ps_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->tcs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->tes_shader.cso);
+ sctx->do_update_shaders = true;
+}
+
static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
sctx->vs_shader.cso = sel;
sctx->vs_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS] : 0;
+
+ si_update_common_shader_state(sctx);
+ si_update_vs_viewport_state(sctx);
si_set_active_descriptors_for_shader(sctx, sel);
si_update_streamout_state(sctx);
si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
sctx->gs_shader.cso = sel;
sctx->gs_shader.current = sel ? sel->first_variant : NULL;
sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
- sctx->do_update_shaders = true;
+
+ si_update_common_shader_state(sctx);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
if (enable_changed) {
if (sctx->ia_multi_vgt_param_key.u.uses_tess)
si_update_tess_uses_prim_id(sctx);
}
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ si_update_vs_viewport_state(sctx);
si_set_active_descriptors_for_shader(sctx, sel);
si_update_streamout_state(sctx);
si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
sctx->tcs_shader.cso = sel;
sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
si_update_tess_uses_prim_id(sctx);
- sctx->do_update_shaders = true;
+
+ si_update_common_shader_state(sctx);
if (enable_changed)
sctx->last_tcs = NULL; /* invalidate derived tess state */
sctx->tes_shader.current = sel ? sel->first_variant : NULL;
sctx->ia_multi_vgt_param_key.u.uses_tess = sel != NULL;
si_update_tess_uses_prim_id(sctx);
- sctx->do_update_shaders = true;
+
+ si_update_common_shader_state(sctx);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
if (enable_changed) {
si_shader_change_notify(sctx);
sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
}
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ si_update_vs_viewport_state(sctx);
si_set_active_descriptors_for_shader(sctx, sel);
si_update_streamout_state(sctx);
si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
sctx->ps_shader.cso = sel;
sctx->ps_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
+ si_update_common_shader_state(sctx);
if (sel) {
if (sctx->ia_multi_vgt_param_key.u.uses_tess)
si_update_tess_uses_prim_id(sctx);
if (!old_sel ||
old_sel->info.colors_written != sel->info.colors_written)
si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+
+ if (sctx->screen->has_out_of_order_rast &&
+ (!old_sel ||
+ old_sel->info.writes_memory != sel->info.writes_memory ||
+ old_sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] !=
+ sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]))
+ si_mark_atom_dirty(sctx, &sctx->msaa_config);
}
si_set_active_descriptors_for_shader(sctx, sel);
+ si_update_ps_colorbuf0_slot(sctx);
}
static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
{
if (shader->is_optimized) {
util_queue_drop_job(&sctx->screen->shader_compiler_queue_low_priority,
- &shader->optimized_ready);
- util_queue_fence_destroy(&shader->optimized_ready);
+ &shader->ready);
}
+ util_queue_fence_destroy(&shader->ready);
+
if (shader->pm4) {
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
free(shader);
}
-static void si_destroy_shader_selector(struct si_context *sctx,
- struct si_shader_selector *sel)
+void si_destroy_shader_selector(struct si_context *sctx,
+ struct si_shader_selector *sel)
{
struct si_shader *p = sel->first_variant, *c;
struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
util_queue_fence_destroy(&sel->ready);
mtx_destroy(&sel->mutex);
free(sel->tokens);
+ ralloc_free(sel->nir);
free(sel);
}
struct si_pm4_state *pm4;
/* Chip constants. */
- unsigned num_se = sctx->screen->b.info.max_se;
+ unsigned num_se = sctx->screen->info.max_se;
unsigned wave_size = 64;
unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
/* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
if (update_esgs) {
pipe_resource_reference(&sctx->esgs_ring, NULL);
sctx->esgs_ring =
- r600_aligned_buffer_create(sctx->b.b.screen,
+ si_aligned_buffer_create(sctx->b.b.screen,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
esgs_ring_size, alignment);
if (update_gsvs) {
pipe_resource_reference(&sctx->gsvs_ring, NULL);
sctx->gsvs_ring =
- r600_aligned_buffer_create(sctx->b.b.screen,
+ si_aligned_buffer_create(sctx->b.b.screen,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
gsvs_ring_size, alignment);
/* Flush the context to re-emit both init_config states. */
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+ si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
/* Set ring bindings. */
if (sctx->esgs_ring) {
r600_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer = (struct r600_resource*)
- r600_aligned_buffer_create(&sctx->screen->b.b,
+ si_aligned_buffer_create(&sctx->screen->b,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
scratch_needed_size, 256);
return false;
si_mark_atom_dirty(sctx, &sctx->scratch_state);
- r600_context_add_resource_size(&sctx->b.b,
- &sctx->scratch_buffer->b.b);
+ si_context_add_resource_size(&sctx->b.b,
+ &sctx->scratch_buffer->b.b);
}
if (!si_update_scratch_relocs(sctx))
static void si_init_tess_factor_ring(struct si_context *sctx)
{
- bool double_offchip_buffers = sctx->b.chip_class >= CIK &&
- sctx->b.family != CHIP_CARRIZO &&
- sctx->b.family != CHIP_STONEY;
- unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
- unsigned max_offchip_buffers = max_offchip_buffers_per_se *
- sctx->screen->b.info.max_se;
- unsigned offchip_granularity;
+ assert(!sctx->tess_rings);
- switch (sctx->screen->tess_offchip_block_dw_size) {
- default:
- assert(0);
- /* fall through */
- case 8192:
- offchip_granularity = V_03093C_X_8K_DWORDS;
- break;
- case 4096:
- offchip_granularity = V_03093C_X_4K_DWORDS;
- break;
- }
-
- switch (sctx->b.chip_class) {
- case SI:
- max_offchip_buffers = MIN2(max_offchip_buffers, 126);
- break;
- case CIK:
- case VI:
- case GFX9:
- max_offchip_buffers = MIN2(max_offchip_buffers, 508);
- break;
- default:
- assert(0);
- return;
- }
-
- assert(!sctx->tf_ring);
- /* Use 64K alignment for both rings, so that we can pass the address
- * to shaders as one SGPR containing bits [16:47].
+ /* The address must be aligned to 2^19, because the shader only
+ * receives the high 13 bits.
*/
- sctx->tf_ring = r600_aligned_buffer_create(sctx->b.b.screen,
- R600_RESOURCE_FLAG_UNMAPPABLE,
- PIPE_USAGE_DEFAULT,
- 32768 * sctx->screen->b.info.max_se,
- 64 * 1024);
- if (!sctx->tf_ring)
- return;
-
- assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0);
-
- sctx->tess_offchip_ring =
- r600_aligned_buffer_create(sctx->b.b.screen,
- R600_RESOURCE_FLAG_UNMAPPABLE,
- PIPE_USAGE_DEFAULT,
- max_offchip_buffers *
- sctx->screen->tess_offchip_block_dw_size * 4,
- 64 * 1024);
- if (!sctx->tess_offchip_ring)
+ sctx->tess_rings = si_aligned_buffer_create(sctx->b.b.screen,
+ R600_RESOURCE_FLAG_32BIT,
+ PIPE_USAGE_DEFAULT,
+ sctx->screen->tess_offchip_ring_size +
+ sctx->screen->tess_factor_ring_size,
+ 1 << 19);
+ if (!sctx->tess_rings)
return;
si_init_config_add_vgt_flush(sctx);
- uint64_t offchip_va = r600_resource(sctx->tess_offchip_ring)->gpu_address;
- uint64_t factor_va = r600_resource(sctx->tf_ring)->gpu_address;
- assert((offchip_va & 0xffff) == 0);
- assert((factor_va & 0xffff) == 0);
-
- si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_offchip_ring),
- RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
- si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tf_ring),
+ si_pm4_add_bo(sctx->init_config, r600_resource(sctx->tess_rings),
RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RINGS);
+ uint64_t factor_va = r600_resource(sctx->tess_rings)->gpu_address +
+ sctx->screen->tess_offchip_ring_size;
+
/* Append these registers to the init config state. */
if (sctx->b.chip_class >= CIK) {
- if (sctx->b.chip_class >= VI)
- --max_offchip_buffers;
-
si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE,
- S_030938_SIZE(sctx->tf_ring->width0 / 4));
+ S_030938_SIZE(sctx->screen->tess_factor_ring_size / 4));
si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE,
factor_va >> 8);
if (sctx->b.chip_class >= GFX9)
si_pm4_set_reg(sctx->init_config, R_030944_VGT_TF_MEMORY_BASE_HI,
- factor_va >> 40);
+ S_030944_BASE_HI(factor_va >> 40));
si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM,
- S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
- S_03093C_OFFCHIP_GRANULARITY(offchip_granularity));
+ sctx->screen->vgt_hs_offchip_param);
} else {
- assert(offchip_granularity == V_03093C_X_8K_DWORDS);
si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE,
- S_008988_SIZE(sctx->tf_ring->width0 / 4));
+ S_008988_SIZE(sctx->screen->tess_factor_ring_size / 4));
si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE,
factor_va >> 8);
si_pm4_set_reg(sctx->init_config, R_0089B0_VGT_HS_OFFCHIP_PARAM,
- S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers));
- }
-
- if (sctx->b.chip_class >= GFX9) {
- si_pm4_set_reg(sctx->init_config,
- R_00B430_SPI_SHADER_USER_DATA_LS_0 +
- GFX9_SGPR_TCS_OFFCHIP_ADDR_BASE64K * 4,
- offchip_va >> 16);
- si_pm4_set_reg(sctx->init_config,
- R_00B430_SPI_SHADER_USER_DATA_LS_0 +
- GFX9_SGPR_TCS_FACTOR_ADDR_BASE64K * 4,
- factor_va >> 16);
- } else {
- si_pm4_set_reg(sctx->init_config,
- R_00B430_SPI_SHADER_USER_DATA_HS_0 +
- GFX6_SGPR_TCS_OFFCHIP_ADDR_BASE64K * 4,
- offchip_va >> 16);
- si_pm4_set_reg(sctx->init_config,
- R_00B430_SPI_SHADER_USER_DATA_HS_0 +
- GFX6_SGPR_TCS_FACTOR_ADDR_BASE64K * 4,
- factor_va >> 16);
+ sctx->screen->vgt_hs_offchip_param);
}
/* Flush the context to re-emit the init_config state.
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+ si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
}
/**
struct si_compiler_ctx_state compiler_state;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct si_shader *old_vs = si_get_vs_state(sctx);
- bool old_clip_disable = old_vs ? old_vs->key.opt.hw_vs.clip_disable : false;
+ bool old_clip_disable = old_vs ? old_vs->key.opt.clip_disable : false;
struct si_shader *old_ps = sctx->ps_shader.current;
unsigned old_spi_shader_col_format =
old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0;
int r;
compiler_state.tm = sctx->tm;
- compiler_state.debug = sctx->b.debug;
+ compiler_state.debug = sctx->debug;
compiler_state.is_debug_context = sctx->is_debug;
/* Update stages before GS. */
if (sctx->tes_shader.cso) {
- if (!sctx->tf_ring) {
+ if (!sctx->tess_rings) {
si_init_tess_factor_ring(sctx);
- if (!sctx->tf_ring)
+ if (!sctx->tess_rings)
return false;
}
si_update_vgt_shader_config(sctx);
- if (old_clip_disable != si_get_vs_state(sctx)->key.opt.hw_vs.clip_disable)
+ if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable)
si_mark_atom_dirty(sctx, &sctx->clip_regs);
if (sctx->ps_shader.cso) {
si_mark_atom_dirty(sctx, &sctx->spi_map);
}
- if (sctx->screen->b.rbplus_allowed &&
+ if (sctx->screen->rbplus_allowed &&
si_pm4_state_changed(sctx, ps) &&
(!old_ps ||
old_spi_shader_col_format !=
if (sctx->ps_db_shader_control != db_shader_control) {
sctx->ps_db_shader_control = db_shader_control;
si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ if (sctx->screen->dpbb_allowed)
+ si_mark_atom_dirty(sctx, &sctx->dpbb_state);
}
if (sctx->smoothing_enabled != sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing) {
}
}
- if (si_pm4_state_changed(sctx, ls) ||
- si_pm4_state_changed(sctx, hs) ||
- si_pm4_state_changed(sctx, es) ||
- si_pm4_state_changed(sctx, gs) ||
- si_pm4_state_changed(sctx, vs) ||
- si_pm4_state_changed(sctx, ps)) {
+ if (si_pm4_state_enabled_and_changed(sctx, ls) ||
+ si_pm4_state_enabled_and_changed(sctx, hs) ||
+ si_pm4_state_enabled_and_changed(sctx, es) ||
+ si_pm4_state_enabled_and_changed(sctx, gs) ||
+ si_pm4_state_enabled_and_changed(sctx, vs) ||
+ si_pm4_state_enabled_and_changed(sctx, ps)) {
if (!si_update_spi_tmpring_size(sctx))
return false;
}
- if (sctx->b.chip_class >= CIK)
- si_mark_atom_dirty(sctx, &sctx->prefetch_L2);
+ if (sctx->b.chip_class >= CIK) {
+ if (si_pm4_state_enabled_and_changed(sctx, ls))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_LS;
+ else if (!sctx->queued.named.ls)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_LS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, hs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_HS;
+ else if (!sctx->queued.named.hs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_HS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, es))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_ES;
+ else if (!sctx->queued.named.es)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_ES;
+
+ if (si_pm4_state_enabled_and_changed(sctx, gs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_GS;
+ else if (!sctx->queued.named.gs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_GS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, vs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_VS;
+ else if (!sctx->queued.named.vs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, ps))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_PS;
+ else if (!sctx->queued.named.ps)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_PS;
+ }
sctx->do_update_shaders = false;
return true;
}
}
+void *si_get_blit_vs(struct si_context *sctx, enum blitter_attrib_type type,
+ unsigned num_layers)
+{
+ struct pipe_context *pipe = &sctx->b.b;
+ unsigned vs_blit_property;
+ void **vs;
+
+ switch (type) {
+ case UTIL_BLITTER_ATTRIB_NONE:
+ vs = num_layers > 1 ? &sctx->vs_blit_pos_layered :
+ &sctx->vs_blit_pos;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS;
+ break;
+ case UTIL_BLITTER_ATTRIB_COLOR:
+ vs = num_layers > 1 ? &sctx->vs_blit_color_layered :
+ &sctx->vs_blit_color;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR;
+ break;
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
+ assert(num_layers == 1);
+ vs = &sctx->vs_blit_texcoord;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD;
+ break;
+ default:
+ assert(0);
+ return NULL;
+ }
+ if (*vs)
+ return *vs;
+
+ struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX);
+ if (!ureg)
+ return NULL;
+
+ /* Tell the shader to load VS inputs from SGPRs: */
+ ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS, vs_blit_property);
+ ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true);
+
+ /* This is just a pass-through shader with 1-3 MOV instructions. */
+ ureg_MOV(ureg,
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0),
+ ureg_DECL_vs_input(ureg, 0));
+
+ if (type != UTIL_BLITTER_ATTRIB_NONE) {
+ ureg_MOV(ureg,
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0),
+ ureg_DECL_vs_input(ureg, 1));
+ }
+
+ if (num_layers > 1) {
+ struct ureg_src instance_id =
+ ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
+ struct ureg_dst layer =
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
+
+ ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X),
+ ureg_scalar(instance_id, TGSI_SWIZZLE_X));
+ }
+ ureg_END(ureg);
+
+ *vs = ureg_create_shader_and_destroy(ureg, pipe);
+ return *vs;
+}
+
void si_init_shader_functions(struct si_context *sctx)
{
si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);