* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Christian König <christian.koenig@amd.com>
- * Marek Olšák <maraeo@gmail.com>
*/
#include "si_pipe.h"
#include "tgsi/tgsi_ureg.h"
#include "util/hash_table.h"
#include "util/crc32.h"
+#include "util/u_async_debug.h"
#include "util/u_memory.h"
#include "util/u_prim.h"
#include "util/disk_cache.h"
#include "util/mesa-sha1.h"
#include "ac_exp_param.h"
+#include "ac_shader_util.h"
/* SHADER_CACHE */
return false;
}
- if (sscreen->b.disk_shader_cache && insert_into_disk_cache) {
- disk_cache_compute_key(sscreen->b.disk_shader_cache, tgsi_binary,
+ if (sscreen->disk_shader_cache && insert_into_disk_cache) {
+ disk_cache_compute_key(sscreen->disk_shader_cache, tgsi_binary,
*((uint32_t *)tgsi_binary), key);
- disk_cache_put(sscreen->b.disk_shader_cache, key, hw_binary,
- *((uint32_t *) hw_binary));
+ disk_cache_put(sscreen->disk_shader_cache, key, hw_binary,
+ *((uint32_t *) hw_binary), NULL);
}
return true;
struct hash_entry *entry =
_mesa_hash_table_search(sscreen->shader_cache, tgsi_binary);
if (!entry) {
- if (sscreen->b.disk_shader_cache) {
+ if (sscreen->disk_shader_cache) {
unsigned char sha1[CACHE_KEY_SIZE];
size_t tg_size = *((uint32_t *) tgsi_binary);
- disk_cache_compute_key(sscreen->b.disk_shader_cache,
+ disk_cache_compute_key(sscreen->disk_shader_cache,
tgsi_binary, tg_size, sha1);
size_t binary_size;
uint8_t *buffer =
- disk_cache_get(sscreen->b.disk_shader_cache,
+ disk_cache_get(sscreen->disk_shader_cache,
sha1, &binary_size);
if (!buffer)
return false;
assert(!"Invalid radeonsi shader disk cache "
"item!");
- disk_cache_remove(sscreen->b.disk_shader_cache,
+ disk_cache_remove(sscreen->disk_shader_cache,
sha1);
free(buffer);
else
return false;
}
- p_atomic_inc(&sscreen->b.num_shader_cache_hits);
+ p_atomic_inc(&sscreen->num_shader_cache_hits);
return true;
}
topology = V_028B6C_OUTPUT_TRIANGLE_CW;
if (sscreen->has_distributed_tess) {
- if (sscreen->b.family == CHIP_FIJI ||
- sscreen->b.family >= CHIP_POLARIS10)
+ if (sscreen->info.family == CHIP_FIJI ||
+ sscreen->info.family >= CHIP_POLARIS10)
distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS;
else
distribution_mode = V_028B6C_DISTRIBUTION_MODE_DONUTS;
{
unsigned type = sel->type;
- if (sscreen->b.family < CHIP_POLARIS10)
+ if (sscreen->info.family < CHIP_POLARIS10)
return;
/* VS as VS, or VS as ES: */
unsigned vgpr_comp_cnt;
uint64_t va;
- assert(sscreen->b.chip_class <= VI);
+ assert(sscreen->info.chip_class <= VI);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- if (sscreen->b.chip_class >= GFX9) {
+ if (sscreen->info.chip_class >= GFX9) {
si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, va >> 40);
S_00B428_FLOAT_MODE(shader->config.float_mode) |
S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt));
- if (sscreen->b.chip_class <= VI) {
+ if (sscreen->info.chip_class <= VI) {
si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
shader->config.rsrc2);
}
uint64_t va;
unsigned oc_lds_en;
- assert(sscreen->b.chip_class <= VI);
+ assert(sscreen->info.chip_class <= VI);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
*/
static uint32_t si_vgt_gs_mode(struct si_shader_selector *sel)
{
- enum chip_class chip_class = sel->screen->b.chip_class;
+ enum chip_class chip_class = sel->screen->info.chip_class;
unsigned gs_max_vert_out = sel->gs_max_out_vertices;
unsigned cut_mode;
/* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
* Make sure we don't go over the maximum value.
*/
- max_gs_prims = MIN2(max_gs_prims,
- max_out_prims /
- (gs->gs_max_out_vertices * gs_num_invocations));
+ if (gs->gs_max_out_vertices > 0) {
+ max_gs_prims = MIN2(max_gs_prims,
+ max_out_prims /
+ (gs->gs_max_out_vertices * gs_num_invocations));
+ }
assert(max_gs_prims > 0);
/* If the primitive has adjacency, halve the number of vertices
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
- if (sscreen->b.chip_class >= GFX9) {
+ if (sscreen->info.chip_class >= GFX9) {
unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM];
unsigned es_type = shader->key.part.gs.es->type;
unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
struct si_shader_selector *gs)
{
+ const struct tgsi_shader_info *info = &shader->selector->info;
struct si_pm4_state *pm4;
unsigned num_user_sgprs;
unsigned nparams, vgpr_comp_cnt;
uint64_t va;
unsigned oc_lds_en;
unsigned window_space =
- shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
- bool enable_prim_id = shader->key.mono.vs_export_prim_id || shader->selector->info.uses_primid;
+ info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
+ bool enable_prim_id = shader->key.mono.u.vs_export_prim_id || info->uses_primid;
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
* not sent again.
*/
if (!gs) {
- unsigned mode = 0;
+ unsigned mode = V_028A40_GS_OFF;
- /* PrimID needs GS scenario A.
- * GFX9 also needs it when ViewportIndex is enabled.
- */
- if (enable_prim_id ||
- (sscreen->b.chip_class >= GFX9 &&
- shader->selector->info.writes_viewport_index))
+ /* PrimID needs GS scenario A. */
+ if (enable_prim_id)
mode = V_028A40_GS_SCENARIO_A;
si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, S_028A40_MODE(mode));
si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0);
}
+ if (sscreen->info.chip_class <= VI) {
+ /* Reuse needs to be set off if we write oViewport. */
+ si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF,
+ S_028AB4_REUSE_OFF(info->writes_viewport_index));
+ }
+
va = shader->bo->gpu_address;
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
* StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
*/
vgpr_comp_cnt = enable_prim_id ? 2 : (shader->info.uses_instanceid ? 1 : 0);
- num_user_sgprs = SI_VS_NUM_USER_SGPR;
+
+ if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS]) {
+ num_user_sgprs = SI_SGPR_VS_BLIT_DATA +
+ info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+ } else {
+ num_user_sgprs = SI_VS_NUM_USER_SGPR;
+ }
} else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) {
vgpr_comp_cnt = enable_prim_id ? 3 : 2;
num_user_sgprs = SI_TES_NUM_USER_SGPR;
return value;
}
-static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format)
-{
- unsigned i, cb_shader_mask = 0;
-
- for (i = 0; i < 8; i++) {
- switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
- case V_028714_SPI_SHADER_ZERO:
- break;
- case V_028714_SPI_SHADER_32_R:
- cb_shader_mask |= 0x1 << (i * 4);
- break;
- case V_028714_SPI_SHADER_32_GR:
- cb_shader_mask |= 0x3 << (i * 4);
- break;
- case V_028714_SPI_SHADER_32_AR:
- cb_shader_mask |= 0x9 << (i * 4);
- break;
- case V_028714_SPI_SHADER_FP16_ABGR:
- case V_028714_SPI_SHADER_UNORM16_ABGR:
- case V_028714_SPI_SHADER_SNORM16_ABGR:
- case V_028714_SPI_SHADER_UINT16_ABGR:
- case V_028714_SPI_SHADER_SINT16_ABGR:
- case V_028714_SPI_SHADER_32_ABGR:
- cb_shader_mask |= 0xf << (i * 4);
- break;
- default:
- assert(0);
- }
- }
- return cb_shader_mask;
-}
-
static void si_shader_ps(struct si_shader *shader)
{
struct tgsi_shader_info *info = &shader->selector->info;
spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1);
spi_shader_col_format = si_get_spi_shader_col_format(shader);
- cb_shader_mask = si_get_cb_shader_mask(spi_shader_col_format);
+ cb_shader_mask = ac_get_cb_shader_mask(spi_shader_col_format);
/* Ensure that some export memory is always allocated, for two reasons:
*
si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT,
- si_get_spi_shader_z_format(info->writes_z,
+ ac_get_spi_shader_z_format(info->writes_z,
info->writes_stencil,
info->writes_samplemask));
if (!sctx->vertex_elements)
return;
+ prolog_key->instance_divisor_is_one =
+ sctx->vertex_elements->instance_divisor_is_one;
+ prolog_key->instance_divisor_is_fetched =
+ sctx->vertex_elements->instance_divisor_is_fetched;
+
+ /* Prefer a monolithic shader to allow scheduling divisions around
+ * VBO loads. */
+ if (prolog_key->instance_divisor_is_fetched)
+ key->opt.prefer_mono = 1;
+
unsigned count = MIN2(vs->info.num_inputs,
sctx->vertex_elements->count);
- for (unsigned i = 0; i < count; ++i) {
- prolog_key->instance_divisors[i] =
- sctx->vertex_elements->elements[i].instance_divisor;
- }
-
memcpy(key->mono.vs_fix_fetch, sctx->vertex_elements->fix_fetch, count);
}
{
struct si_shader_selector *ps = sctx->ps_shader.cso;
- key->opt.hw_vs.clip_disable =
+ key->opt.clip_disable =
sctx->queued.named.rasterizer->clip_plane_enable == 0 &&
(vs->info.clipdist_writemask ||
vs->info.writes_clipvertex) &&
uint64_t outputs_written = vs->outputs_written;
uint64_t inputs_read = 0;
- outputs_written &= ~0x3; /* ignore POSITION, PSIZE */
+ /* ignore POSITION, PSIZE */
+ outputs_written &= ~((1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_POSITION, 0) |
+ (1ull << si_shader_io_get_unique_index(TGSI_SEMANTIC_PSIZE, 0))));
if (!ps_disabled) {
inputs_read = ps->inputs_read;
uint64_t linked = outputs_written & inputs_read;
- key->opt.hw_vs.kill_outputs = ~linked & outputs_written;
+ key->opt.kill_outputs = ~linked & outputs_written;
}
/* Compute the key for the hw shader variant */
si_shader_selector_key_hw_vs(sctx, sel, key);
if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
- key->mono.vs_export_prim_id = 1;
+ key->mono.u.vs_export_prim_id = 1;
}
break;
case PIPE_SHADER_TESS_CTRL:
si_shader_selector_key_vs(sctx, sctx->vs_shader.cso,
key, &key->part.tcs.ls_prolog);
key->part.tcs.ls = sctx->vs_shader.cso;
+
+ /* When the LS VGPR fix is needed, monolithic shaders
+ * can:
+ * - avoid initializing EXEC in both the LS prolog
+ * and the LS main part when !vs_needs_prolog
+ * - remove the fixup for unused input VGPRs
+ */
+ key->part.tcs.ls_prolog.ls_vgpr_fix = sctx->ls_vgpr_fix;
+
+ /* The LS output / HS input layout can be communicated
+ * directly instead of via user SGPRs for merged LS-HS.
+ * The LS VGPR fix prefers this too.
+ */
+ key->opt.prefer_mono = 1;
}
key->part.tcs.epilog.prim_mode =
sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ key->part.tcs.epilog.invoc0_tess_factors_are_def =
+ sel->tcs_info.tessfactors_are_def_in_all_invocs;
key->part.tcs.epilog.tes_reads_tess_factors =
sctx->tes_shader.cso->info.reads_tess_factors;
if (sel == sctx->fixed_func_tcs_shader.cso)
- key->mono.ff_tcs_inputs_to_copy = sctx->vs_shader.cso->outputs_written;
+ key->mono.u.ff_tcs_inputs_to_copy = sctx->vs_shader.cso->outputs_written;
break;
case PIPE_SHADER_TESS_EVAL:
if (sctx->gs_shader.cso)
si_shader_selector_key_hw_vs(sctx, sel, key);
if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid)
- key->mono.vs_export_prim_id = 1;
+ key->mono.u.vs_export_prim_id = 1;
}
break;
case PIPE_SHADER_GEOMETRY:
sctx->framebuffer.spi_shader_col_format_alpha) |
(~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit &
sctx->framebuffer.spi_shader_col_format);
+ key->part.ps.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
/* The output for dual source blending should have
* the same format as the first output.
sctx->framebuffer.nr_samples <= 1;
key->part.ps.epilog.clamp_color = rs->clamp_fragment_color;
+ if (sctx->ps_iter_samples > 1 &&
+ sel->info.reads_samplemask) {
+ key->part.ps.prolog.samplemask_log_ps_iter =
+ util_logbase2(util_next_power_of_two(sctx->ps_iter_samples));
+ }
+
if (rs->force_persample_interp &&
rs->multisample_enable &&
sctx->framebuffer.nr_samples > 1 &&
sel->info.uses_linear_center +
sel->info.uses_linear_centroid +
sel->info.uses_linear_sample > 1;
+
+ if (sel->info.opcode_count[TGSI_OPCODE_INTERP_SAMPLE])
+ key->mono.u.ps.interpolate_at_sample_force_center = 1;
}
}
default:
assert(0);
}
+
+ if (unlikely(sctx->screen->debug_flags & DBG(NO_OPT_VARIANT)))
+ memset(&key->opt, 0, sizeof(key->opt));
}
-static void si_build_shader_variant(void *job, int thread_index)
+static void si_build_shader_variant(struct si_shader *shader,
+ int thread_index,
+ bool low_priority)
{
- struct si_shader *shader = (struct si_shader *)job;
struct si_shader_selector *sel = shader->selector;
struct si_screen *sscreen = sel->screen;
LLVMTargetMachineRef tm;
int r;
if (thread_index >= 0) {
- assert(thread_index < ARRAY_SIZE(sscreen->tm));
- tm = sscreen->tm[thread_index];
+ if (low_priority) {
+ assert(thread_index < ARRAY_SIZE(sscreen->tm_low_priority));
+ tm = sscreen->tm_low_priority[thread_index];
+ } else {
+ assert(thread_index < ARRAY_SIZE(sscreen->tm));
+ tm = sscreen->tm[thread_index];
+ }
if (!debug->async)
debug = NULL;
} else {
+ assert(!low_priority);
tm = shader->compiler_ctx_state.tm;
}
si_shader_init_pm4_state(sscreen, shader);
}
+static void si_build_shader_variant_low_priority(void *job, int thread_index)
+{
+ struct si_shader *shader = (struct si_shader *)job;
+
+ assert(thread_index >= 0);
+
+ si_build_shader_variant(shader, thread_index, true);
+}
+
static const struct si_shader_key zeroed;
static bool si_check_missing_main_part(struct si_screen *sscreen,
if (!main_part)
return false;
+ /* We can leave the fence as permanently signaled because the
+ * main part becomes visible globally only after it has been
+ * compiled. */
+ util_queue_fence_init(&main_part->ready);
+
main_part->selector = sel;
main_part->key.as_es = key->as_es;
main_part->key.as_ls = key->as_ls;
return true;
}
-static void si_destroy_shader_selector(struct si_context *sctx,
- struct si_shader_selector *sel);
-
-static void si_shader_selector_reference(struct si_context *sctx,
- struct si_shader_selector **dst,
- struct si_shader_selector *src)
-{
- if (pipe_reference(&(*dst)->reference, &src->reference))
- si_destroy_shader_selector(sctx, *dst);
-
- *dst = src;
-}
-
/* Select the hw shader variant depending on the current state. */
static int si_shader_select_with_key(struct si_screen *sscreen,
struct si_shader_ctx_state *state,
struct si_shader *current = state->current;
struct si_shader *iter, *shader = NULL;
- if (unlikely(sscreen->b.debug_flags & DBG_NO_OPT_VARIANT)) {
- memset(&key->opt, 0, sizeof(key->opt));
- }
-
again:
/* Check if we don't need to change anything.
* This path is also used for most shaders that don't need multiple
* variants, it will cost just a computation of the key and this
* test. */
if (likely(current &&
- memcmp(¤t->key, key, sizeof(*key)) == 0 &&
- (!current->is_optimized ||
- util_queue_fence_is_signalled(¤t->optimized_ready))))
+ memcmp(¤t->key, key, sizeof(*key)) == 0)) {
+ if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) {
+ if (current->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ goto current_not_ready;
+ }
+
+ util_queue_fence_wait(¤t->ready);
+ }
+
return current->compilation_failed ? -1 : 0;
+ }
+current_not_ready:
/* This must be done before the mutex is locked, because async GS
* compilation calls this function too, and therefore must enter
/* Don't check the "current" shader. We checked it above. */
if (current != iter &&
memcmp(&iter->key, key, sizeof(*key)) == 0) {
- /* If it's an optimized shader and its compilation has
- * been started but isn't done, use the unoptimized
- * shader so as not to cause a stall due to compilation.
- */
- if (iter->is_optimized &&
- !util_queue_fence_is_signalled(&iter->optimized_ready)) {
- memset(&key->opt, 0, sizeof(key->opt));
- mtx_unlock(&sel->mutex);
- goto again;
+ mtx_unlock(&sel->mutex);
+
+ if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) {
+ /* If it's an optimized shader and its compilation has
+ * been started but isn't done, use the unoptimized
+ * shader so as not to cause a stall due to compilation.
+ */
+ if (iter->is_optimized) {
+ memset(&key->opt, 0, sizeof(key->opt));
+ goto again;
+ }
+
+ util_queue_fence_wait(&iter->ready);
}
if (iter->compilation_failed) {
- mtx_unlock(&sel->mutex);
return -1; /* skip the draw call */
}
state->current = iter;
- mtx_unlock(&sel->mutex);
return 0;
}
}
mtx_unlock(&sel->mutex);
return -ENOMEM;
}
+
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
shader->key = *key;
shader->compiler_ctx_state = *compiler_state;
/* If this is a merged shader, get the first shader's selector. */
- if (sscreen->b.chip_class >= GFX9) {
+ if (sscreen->info.chip_class >= GFX9) {
if (sel->type == PIPE_SHADER_TESS_CTRL)
previous_stage_sel = key->part.tcs.ls;
else if (sel->type == PIPE_SHADER_GEOMETRY)
previous_stage_sel = key->part.gs.es;
+
+ /* We need to wait for the previous shader. */
+ if (previous_stage_sel && thread_index < 0)
+ util_queue_fence_wait(&previous_stage_sel->ready);
}
/* Compile the main shader part if it doesn't exist. This can happen
else
assert(0);
+ mtx_lock(&previous_stage_sel->mutex);
ok = si_check_missing_main_part(sscreen,
previous_stage_sel,
compiler_state, &shader1_key);
+ mtx_unlock(&previous_stage_sel->mutex);
} else {
ok = si_check_missing_main_part(sscreen, sel,
compiler_state, key);
shader->is_optimized =
!is_pure_monolithic &&
memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0;
- if (shader->is_optimized)
- util_queue_fence_init(&shader->optimized_ready);
-
- if (!sel->last_variant) {
- sel->first_variant = shader;
- sel->last_variant = shader;
- } else {
- sel->last_variant->next_variant = shader;
- sel->last_variant = shader;
- }
/* If it's an optimized shader, compile it asynchronously. */
if (shader->is_optimized &&
!is_pure_monolithic &&
thread_index < 0) {
/* Compile it asynchronously. */
- util_queue_add_job(&sscreen->shader_compiler_queue,
- shader, &shader->optimized_ready,
- si_build_shader_variant, NULL);
+ util_queue_add_job(&sscreen->shader_compiler_queue_low_priority,
+ shader, &shader->ready,
+ si_build_shader_variant_low_priority, NULL);
+
+ /* Add only after the ready fence was reset, to guard against a
+ * race with si_bind_XX_shader. */
+ if (!sel->last_variant) {
+ sel->first_variant = shader;
+ sel->last_variant = shader;
+ } else {
+ sel->last_variant->next_variant = shader;
+ sel->last_variant = shader;
+ }
/* Use the default (unoptimized) shader for now. */
memset(&key->opt, 0, sizeof(key->opt));
goto again;
}
+ /* Reset the fence before adding to the variant list. */
+ util_queue_fence_reset(&shader->ready);
+
+ if (!sel->last_variant) {
+ sel->first_variant = shader;
+ sel->last_variant = shader;
+ } else {
+ sel->last_variant->next_variant = shader;
+ sel->last_variant = shader;
+ }
+
+ mtx_unlock(&sel->mutex);
+
assert(!shader->is_optimized);
- si_build_shader_variant(shader, thread_index);
+ si_build_shader_variant(shader, thread_index, false);
+
+ util_queue_fence_signal(&shader->ready);
if (!shader->compilation_failed)
state->current = shader;
- mtx_unlock(&sel->mutex);
return shader->compilation_failed ? -1 : 0;
}
}
static void si_parse_next_shader_property(const struct tgsi_shader_info *info,
+ bool streamout,
struct si_shader_key *key)
{
unsigned next_shader = info->properties[TGSI_PROPERTY_NEXT_SHADER];
key->as_ls = 1;
break;
default:
- /* If POSITION isn't written, it can't be a HW VS.
- * Assume that it's a HW LS. (the next shader is TCS)
+ /* If POSITION isn't written, it can only be a HW VS
+ * if streamout is used. If streamout isn't used,
+ * assume that it's a HW LS. (the next shader is TCS)
* This heuristic is needed for separate shader objects.
*/
- if (!info->writes_position)
+ if (!info->writes_position && !streamout)
key->as_ls = 1;
}
break;
* si_shader_selector initialization. Since it can be done asynchronously,
* there is no way to report compile failures to applications.
*/
-void si_init_shader_selector_async(void *job, int thread_index)
+static void si_init_shader_selector_async(void *job, int thread_index)
{
struct si_shader_selector *sel = (struct si_shader_selector *)job;
struct si_screen *sscreen = sel->screen;
struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
unsigned i;
- if (thread_index >= 0) {
- assert(thread_index < ARRAY_SIZE(sscreen->tm));
- tm = sscreen->tm[thread_index];
- if (!debug->async)
- debug = NULL;
- } else {
- tm = sel->compiler_ctx_state.tm;
- }
+ assert(!debug->debug_message || debug->async);
+ assert(thread_index >= 0);
+ assert(thread_index < ARRAY_SIZE(sscreen->tm));
+ tm = sscreen->tm[thread_index];
/* Compile the main shader part for use with a prolog and/or epilog.
* If this fails, the driver will try to compile a monolithic shader
*/
if (!sscreen->use_monolithic_shaders) {
struct si_shader *shader = CALLOC_STRUCT(si_shader);
- void *tgsi_binary;
+ void *tgsi_binary = NULL;
if (!shader) {
fprintf(stderr, "radeonsi: can't allocate a main shader part\n");
return;
}
+ /* We can leave the fence signaled because use of the default
+ * main part is guarded by the selector's ready fence. */
+ util_queue_fence_init(&shader->ready);
+
shader->selector = sel;
- si_parse_next_shader_property(&sel->info, &shader->key);
+ si_parse_next_shader_property(&sel->info,
+ sel->so.num_outputs != 0,
+ &shader->key);
- tgsi_binary = si_get_tgsi_binary(sel);
+ if (sel->tokens)
+ tgsi_binary = si_get_tgsi_binary(sel);
/* Try to load the shader from the shader cache. */
mtx_lock(&sscreen->shader_cache_mutex);
}
/* Pre-compilation. */
- if (sscreen->b.debug_flags & DBG_PRECOMPILE) {
+ if (sscreen->debug_flags & DBG(PRECOMPILE) &&
+ /* GFX9 needs LS or ES for compilation, which we don't have here. */
+ (sscreen->info.chip_class <= VI ||
+ (sel->type != PIPE_SHADER_TESS_CTRL &&
+ sel->type != PIPE_SHADER_GEOMETRY))) {
struct si_shader_ctx_state state = {sel};
struct si_shader_key key;
memset(&key, 0, sizeof(key));
- si_parse_next_shader_property(&sel->info, &key);
+ si_parse_next_shader_property(&sel->info,
+ sel->so.num_outputs != 0,
+ &key);
+
+ /* GFX9 doesn't have LS and ES. */
+ if (sscreen->info.chip_class >= GFX9) {
+ key.as_ls = 0;
+ key.as_es = 0;
+ }
/* Set reasonable defaults, so that the shader key doesn't
* cause any code to be eliminated.
}
}
+/* Return descriptor slot usage masks from the given shader info. */
+void si_get_active_slot_masks(const struct tgsi_shader_info *info,
+ uint32_t *const_and_shader_buffers,
+ uint64_t *samplers_and_images)
+{
+ unsigned start, num_shaderbufs, num_constbufs, num_images, num_samplers;
+
+ num_shaderbufs = util_last_bit(info->shader_buffers_declared);
+ num_constbufs = util_last_bit(info->const_buffers_declared);
+ /* two 8-byte images share one 16-byte slot */
+ num_images = align(util_last_bit(info->images_declared), 2);
+ num_samplers = util_last_bit(info->samplers_declared);
+
+ /* The layout is: sb[last] ... sb[0], cb[0] ... cb[last] */
+ start = si_get_shaderbuf_slot(num_shaderbufs - 1);
+ *const_and_shader_buffers =
+ u_bit_consecutive(start, num_shaderbufs + num_constbufs);
+
+ /* The layout is: image[last] ... image[0], sampler[0] ... sampler[last] */
+ start = si_get_image_slot(num_images - 1) / 2;
+ *samplers_and_images =
+ u_bit_consecutive64(start, num_images / 2 + num_samplers);
+}
+
static void *si_create_shader_selector(struct pipe_context *ctx,
const struct pipe_shader_state *state)
{
pipe_reference_init(&sel->reference, 1);
sel->screen = sscreen;
- sel->compiler_ctx_state.tm = sctx->tm;
- sel->compiler_ctx_state.debug = sctx->b.debug;
+ sel->compiler_ctx_state.debug = sctx->debug;
sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
- sel->tokens = tgsi_dup_tokens(state->tokens);
- if (!sel->tokens) {
- FREE(sel);
- return NULL;
- }
sel->so = state->stream_output;
- tgsi_scan_shader(state->tokens, &sel->info);
+
+ if (state->type == PIPE_SHADER_IR_TGSI) {
+ sel->tokens = tgsi_dup_tokens(state->tokens);
+ if (!sel->tokens) {
+ FREE(sel);
+ return NULL;
+ }
+
+ tgsi_scan_shader(state->tokens, &sel->info);
+ tgsi_scan_tess_ctrl(state->tokens, &sel->info, &sel->tcs_info);
+ } else {
+ assert(state->type == PIPE_SHADER_IR_NIR);
+
+ sel->nir = state->ir.nir;
+
+ si_nir_scan_shader(sel->nir, &sel->info);
+
+ si_lower_nir(sel);
+ }
+
sel->type = sel->info.processor;
- p_atomic_inc(&sscreen->b.num_shaders_created);
+ p_atomic_inc(&sscreen->num_shaders_created);
+ si_get_active_slot_masks(&sel->info,
+ &sel->active_const_and_shader_buffers,
+ &sel->active_samplers_and_images);
+
+ /* Record which streamout buffers are enabled. */
+ for (i = 0; i < sel->so.num_outputs; i++) {
+ sel->enabled_streamout_buffer_mask |=
+ (1 << sel->so.output[i].output_buffer) <<
+ (sel->so.output[i].stream * 4);
+ }
/* The prolog is a no-op if there are no inputs. */
sel->vs_needs_prolog = sel->type == PIPE_SHADER_VERTEX &&
- sel->info.num_inputs;
+ sel->info.num_inputs &&
+ !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS];
+
+ sel->force_correct_derivs_after_kill =
+ sel->type == PIPE_SHADER_FRAGMENT &&
+ sel->info.uses_derivatives &&
+ sel->info.uses_kill &&
+ sctx->screen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL);
/* Set which opcode uses which (i,j) pair. */
if (sel->info.uses_persp_opcode_interp_centroid)
case PIPE_SHADER_TESS_CTRL:
/* Always reserve space for these. */
sel->patch_outputs_written |=
- (1llu << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0)) |
- (1llu << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0));
+ (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSINNER, 0)) |
+ (1ull << si_shader_io_get_unique_index_patch(TGSI_SEMANTIC_TESSOUTER, 0));
/* fall through */
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_EVAL:
case TGSI_SEMANTIC_TESSOUTER:
case TGSI_SEMANTIC_PATCH:
sel->patch_outputs_written |=
- 1llu << si_shader_io_get_unique_index_patch(name, index);
+ 1ull << si_shader_io_get_unique_index_patch(name, index);
break;
case TGSI_SEMANTIC_GENERIC:
/* fall through */
default:
sel->outputs_written |=
- 1llu << si_shader_io_get_unique_index(name, index);
+ 1ull << si_shader_io_get_unique_index(name, index);
break;
case TGSI_SEMANTIC_CLIPVERTEX: /* ignore these */
case TGSI_SEMANTIC_EDGEFLAG:
/* fall through */
default:
sel->inputs_read |=
- 1llu << si_shader_io_get_unique_index(name, index);
+ 1ull << si_shader_io_get_unique_index(name, index);
break;
case TGSI_SEMANTIC_PCOORD: /* ignore this */
break;
break;
}
+ /* PA_CL_VS_OUT_CNTL */
+ bool misc_vec_ena =
+ sel->info.writes_psize || sel->info.writes_edgeflag ||
+ sel->info.writes_layer || sel->info.writes_viewport_index;
+ sel->pa_cl_vs_out_cntl =
+ S_02881C_USE_VTX_POINT_SIZE(sel->info.writes_psize) |
+ S_02881C_USE_VTX_EDGE_FLAG(sel->info.writes_edgeflag) |
+ S_02881C_USE_VTX_RENDER_TARGET_INDX(sel->info.writes_layer) |
+ S_02881C_USE_VTX_VIEWPORT_INDX(sel->info.writes_viewport_index) |
+ S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
+ S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena);
+ sel->clipdist_mask = sel->info.writes_clipvertex ?
+ SIX_BITS : sel->info.clipdist_writemask;
+ sel->culldist_mask = sel->info.culldist_writemask <<
+ sel->info.num_written_clipdistance;
+
/* DB_SHADER_CONTROL */
sel->db_shader_control =
S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) |
(void) mtx_init(&sel->mutex, mtx_plain);
util_queue_fence_init(&sel->ready);
- if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
- sctx->is_debug ||
- r600_can_dump_shader(&sscreen->b, sel->info.processor))
- si_init_shader_selector_async(sel, -1);
- else
- util_queue_add_job(&sscreen->shader_compiler_queue, sel,
- &sel->ready, si_init_shader_selector_async,
- NULL);
+ struct util_async_debug_callback async_debug;
+ bool wait =
+ (sctx->debug.debug_message && !sctx->debug.async) ||
+ sctx->is_debug ||
+ si_can_dump_shader(sscreen, sel->info.processor);
+
+ if (wait) {
+ u_async_debug_init(&async_debug);
+ sel->compiler_ctx_state.debug = async_debug.base;
+ }
+
+ util_queue_add_job(&sscreen->shader_compiler_queue, sel,
+ &sel->ready, si_init_shader_selector_async,
+ NULL);
+
+ if (wait) {
+ util_queue_fence_wait(&sel->ready);
+ u_async_debug_drain(&async_debug, &sctx->debug);
+ u_async_debug_cleanup(&async_debug);
+ }
return sel;
}
+static void si_update_streamout_state(struct si_context *sctx)
+{
+ struct si_shader_selector *shader_with_so = si_get_vs(sctx)->cso;
+
+ if (!shader_with_so)
+ return;
+
+ sctx->streamout.enabled_stream_buffers_mask =
+ shader_with_so->enabled_streamout_buffer_mask;
+ sctx->streamout.stride_in_dw = shader_with_so->so.stride;
+}
+
+static void si_update_clip_regs(struct si_context *sctx,
+ struct si_shader_selector *old_hw_vs,
+ struct si_shader *old_hw_vs_variant,
+ struct si_shader_selector *next_hw_vs,
+ struct si_shader *next_hw_vs_variant)
+{
+ if (next_hw_vs &&
+ (!old_hw_vs ||
+ old_hw_vs->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] !=
+ next_hw_vs->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] ||
+ old_hw_vs->pa_cl_vs_out_cntl != next_hw_vs->pa_cl_vs_out_cntl ||
+ old_hw_vs->clipdist_mask != next_hw_vs->clipdist_mask ||
+ old_hw_vs->culldist_mask != next_hw_vs->culldist_mask ||
+ !old_hw_vs_variant ||
+ !next_hw_vs_variant ||
+ old_hw_vs_variant->key.opt.clip_disable !=
+ next_hw_vs_variant->key.opt.clip_disable))
+ si_mark_atom_dirty(sctx, &sctx->clip_regs);
+}
+
+static void si_update_common_shader_state(struct si_context *sctx)
+{
+ sctx->uses_bindless_samplers =
+ si_shader_uses_bindless_samplers(sctx->vs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->gs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->ps_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->tcs_shader.cso) ||
+ si_shader_uses_bindless_samplers(sctx->tes_shader.cso);
+ sctx->uses_bindless_images =
+ si_shader_uses_bindless_images(sctx->vs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->gs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->ps_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->tcs_shader.cso) ||
+ si_shader_uses_bindless_images(sctx->tes_shader.cso);
+ sctx->do_update_shaders = true;
+}
+
static void si_bind_vs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
+ struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
struct si_shader_selector *sel = state;
if (sctx->vs_shader.cso == sel)
sctx->vs_shader.cso = sel;
sctx->vs_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ sctx->num_vs_blit_sgprs = sel ? sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS] : 0;
+
+ si_update_common_shader_state(sctx);
+ si_update_vs_viewport_state(sctx);
+ si_set_active_descriptors_for_shader(sctx, sel);
+ si_update_streamout_state(sctx);
+ si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
+ si_get_vs(sctx)->cso, si_get_vs_state(sctx));
}
-static void si_update_tcs_tes_uses_prim_id(struct si_context *sctx)
+static void si_update_tess_uses_prim_id(struct si_context *sctx)
{
- sctx->ia_multi_vgt_param_key.u.tcs_tes_uses_prim_id =
+ sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id =
(sctx->tes_shader.cso &&
sctx->tes_shader.cso->info.uses_primid) ||
(sctx->tcs_shader.cso &&
static void si_bind_gs_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
+ struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
struct si_shader_selector *sel = state;
bool enable_changed = !!sctx->gs_shader.cso != !!sel;
sctx->gs_shader.cso = sel;
sctx->gs_shader.current = sel ? sel->first_variant : NULL;
sctx->ia_multi_vgt_param_key.u.uses_gs = sel != NULL;
- sctx->do_update_shaders = true;
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
+
+ si_update_common_shader_state(sctx);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
if (enable_changed) {
si_shader_change_notify(sctx);
if (sctx->ia_multi_vgt_param_key.u.uses_tess)
- si_update_tcs_tes_uses_prim_id(sctx);
+ si_update_tess_uses_prim_id(sctx);
}
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ si_update_vs_viewport_state(sctx);
+ si_set_active_descriptors_for_shader(sctx, sel);
+ si_update_streamout_state(sctx);
+ si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
+ si_get_vs(sctx)->cso, si_get_vs_state(sctx));
}
static void si_bind_tcs_shader(struct pipe_context *ctx, void *state)
sctx->tcs_shader.cso = sel;
sctx->tcs_shader.current = sel ? sel->first_variant : NULL;
- si_update_tcs_tes_uses_prim_id(sctx);
- sctx->do_update_shaders = true;
+ si_update_tess_uses_prim_id(sctx);
+
+ si_update_common_shader_state(sctx);
if (enable_changed)
sctx->last_tcs = NULL; /* invalidate derived tess state */
+
+ si_set_active_descriptors_for_shader(sctx, sel);
}
static void si_bind_tes_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_hw_vs = si_get_vs(sctx)->cso;
+ struct si_shader *old_hw_vs_variant = si_get_vs_state(sctx);
struct si_shader_selector *sel = state;
bool enable_changed = !!sctx->tes_shader.cso != !!sel;
sctx->tes_shader.cso = sel;
sctx->tes_shader.current = sel ? sel->first_variant : NULL;
sctx->ia_multi_vgt_param_key.u.uses_tess = sel != NULL;
- si_update_tcs_tes_uses_prim_id(sctx);
- sctx->do_update_shaders = true;
- si_mark_atom_dirty(sctx, &sctx->clip_regs);
+ si_update_tess_uses_prim_id(sctx);
+
+ si_update_common_shader_state(sctx);
sctx->last_rast_prim = -1; /* reset this so that it gets updated */
if (enable_changed) {
si_shader_change_notify(sctx);
sctx->last_tes_sh_base = -1; /* invalidate derived tess state */
}
- r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx));
+ si_update_vs_viewport_state(sctx);
+ si_set_active_descriptors_for_shader(sctx, sel);
+ si_update_streamout_state(sctx);
+ si_update_clip_regs(sctx, old_hw_vs, old_hw_vs_variant,
+ si_get_vs(sctx)->cso, si_get_vs_state(sctx));
}
static void si_bind_ps_shader(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_shader_selector *old_sel = sctx->ps_shader.cso;
struct si_shader_selector *sel = state;
/* skip if supplied shader is one already in use */
- if (sctx->ps_shader.cso == sel)
+ if (old_sel == sel)
return;
sctx->ps_shader.cso = sel;
sctx->ps_shader.current = sel ? sel->first_variant : NULL;
- sctx->do_update_shaders = true;
- if (sel && sctx->ia_multi_vgt_param_key.u.uses_tess)
- si_update_tcs_tes_uses_prim_id(sctx);
- si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+
+ si_update_common_shader_state(sctx);
+ if (sel) {
+ if (sctx->ia_multi_vgt_param_key.u.uses_tess)
+ si_update_tess_uses_prim_id(sctx);
+
+ if (!old_sel ||
+ old_sel->info.colors_written != sel->info.colors_written)
+ si_mark_atom_dirty(sctx, &sctx->cb_render_state);
+
+ if (sctx->screen->has_out_of_order_rast &&
+ (!old_sel ||
+ old_sel->info.writes_memory != sel->info.writes_memory ||
+ old_sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL] !=
+ sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]))
+ si_mark_atom_dirty(sctx, &sctx->msaa_config);
+ }
+ si_set_active_descriptors_for_shader(sctx, sel);
}
static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
{
if (shader->is_optimized) {
- util_queue_fence_wait(&shader->optimized_ready);
- util_queue_fence_destroy(&shader->optimized_ready);
+ util_queue_drop_job(&sctx->screen->shader_compiler_queue_low_priority,
+ &shader->ready);
}
+ util_queue_fence_destroy(&shader->ready);
+
if (shader->pm4) {
switch (shader->selector->type) {
case PIPE_SHADER_VERTEX:
free(shader);
}
-static void si_destroy_shader_selector(struct si_context *sctx,
- struct si_shader_selector *sel)
+void si_destroy_shader_selector(struct si_context *sctx,
+ struct si_shader_selector *sel)
{
struct si_shader *p = sel->first_variant, *c;
struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = {
[PIPE_SHADER_FRAGMENT] = &sctx->ps_shader,
};
- util_queue_fence_wait(&sel->ready);
+ util_queue_drop_job(&sctx->screen->shader_compiler_queue, &sel->ready);
if (current_shader[sel->type]->cso == sel) {
current_shader[sel->type]->cso = NULL;
util_queue_fence_destroy(&sel->ready);
mtx_destroy(&sel->mutex);
free(sel->tokens);
+ ralloc_free(sel->nir);
free(sel);
}
struct si_pm4_state *pm4;
/* Chip constants. */
- unsigned num_se = sctx->screen->b.info.max_se;
+ unsigned num_se = sctx->screen->info.max_se;
unsigned wave_size = 64;
unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
/* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16.
if (update_esgs) {
pipe_resource_reference(&sctx->esgs_ring, NULL);
sctx->esgs_ring =
- r600_aligned_buffer_create(sctx->b.b.screen,
+ si_aligned_buffer_create(sctx->b.b.screen,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
esgs_ring_size, alignment);
if (update_gsvs) {
pipe_resource_reference(&sctx->gsvs_ring, NULL);
sctx->gsvs_ring =
- r600_aligned_buffer_create(sctx->b.b.screen,
+ si_aligned_buffer_create(sctx->b.b.screen,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
gsvs_ring_size, alignment);
/* Flush the context to re-emit both init_config states. */
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+ si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
/* Set ring bindings. */
if (sctx->esgs_ring) {
return shader ? shader->config.scratch_bytes_per_wave : 0;
}
+static struct si_shader *si_get_tcs_current(struct si_context *sctx)
+{
+ if (!sctx->tes_shader.cso)
+ return NULL; /* tessellation disabled */
+
+ return sctx->tcs_shader.cso ? sctx->tcs_shader.current :
+ sctx->fixed_func_tcs_shader.current;
+}
+
static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx)
{
unsigned bytes = 0;
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current));
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current));
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current));
- bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tcs_shader.current));
bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current));
+
+ if (sctx->tes_shader.cso) {
+ struct si_shader *tcs = si_get_tcs_current(sctx);
+
+ bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(tcs));
+ }
return bytes;
}
static bool si_update_scratch_relocs(struct si_context *sctx)
{
+ struct si_shader *tcs = si_get_tcs_current(sctx);
int r;
/* Update the shaders, so that they are using the latest scratch.
if (r == 1)
si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
- r = si_update_scratch_buffer(sctx, sctx->tcs_shader.current);
+ r = si_update_scratch_buffer(sctx, tcs);
if (r < 0)
return false;
if (r == 1)
- si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4);
+ si_pm4_bind_state(sctx, hs, tcs->pm4);
/* VS can be bound as LS, ES, or VS. */
r = si_update_scratch_buffer(sctx, sctx->vs_shader.current);
r600_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer = (struct r600_resource*)
- r600_aligned_buffer_create(&sctx->screen->b.b,
+ si_aligned_buffer_create(&sctx->screen->b,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
scratch_needed_size, 256);
return false;
si_mark_atom_dirty(sctx, &sctx->scratch_state);
- r600_context_add_resource_size(&sctx->b.b,
- &sctx->scratch_buffer->b.b);
+ si_context_add_resource_size(&sctx->b.b,
+ &sctx->scratch_buffer->b.b);
}
if (!si_update_scratch_relocs(sctx))
bool double_offchip_buffers = sctx->b.chip_class >= CIK &&
sctx->b.family != CHIP_CARRIZO &&
sctx->b.family != CHIP_STONEY;
- unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
+ /* This must be one less than the maximum number due to a hw limitation.
+ * Various hardware bugs in SI, CIK, and GFX9 need this.
+ */
+ unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 127 : 63;
unsigned max_offchip_buffers = max_offchip_buffers_per_se *
- sctx->screen->b.info.max_se;
+ sctx->screen->info.max_se;
unsigned offchip_granularity;
switch (sctx->screen->tess_offchip_block_dw_size) {
break;
}
- switch (sctx->b.chip_class) {
- case SI:
- max_offchip_buffers = MIN2(max_offchip_buffers, 126);
- break;
- case CIK:
- case VI:
- case GFX9:
- max_offchip_buffers = MIN2(max_offchip_buffers, 508);
- break;
- default:
- assert(0);
- return;
- }
-
assert(!sctx->tf_ring);
/* Use 64K alignment for both rings, so that we can pass the address
* to shaders as one SGPR containing bits [16:47].
*/
- sctx->tf_ring = r600_aligned_buffer_create(sctx->b.b.screen,
+ sctx->tf_ring = si_aligned_buffer_create(sctx->b.b.screen,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
- 32768 * sctx->screen->b.info.max_se,
+ 32768 * sctx->screen->info.max_se,
64 * 1024);
if (!sctx->tf_ring)
return;
assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0);
sctx->tess_offchip_ring =
- r600_aligned_buffer_create(sctx->b.b.screen,
+ si_aligned_buffer_create(sctx->b.b.screen,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
max_offchip_buffers *
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+ si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
}
/**
si_pm4_bind_state(sctx, vgt_shader_config, *pm4);
}
-static void si_update_so(struct si_context *sctx, struct si_shader_selector *shader)
-{
- struct pipe_stream_output_info *so = &shader->so;
- uint32_t enabled_stream_buffers_mask = 0;
- int i;
-
- for (i = 0; i < so->num_outputs; i++)
- enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << (so->output[i].stream * 4);
- sctx->b.streamout.enabled_stream_buffers_mask = enabled_stream_buffers_mask;
- sctx->b.streamout.stride_in_dw = shader->so.stride;
-}
-
bool si_update_shaders(struct si_context *sctx)
{
struct pipe_context *ctx = (struct pipe_context*)sctx;
struct si_compiler_ctx_state compiler_state;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct si_shader *old_vs = si_get_vs_state(sctx);
- bool old_clip_disable = old_vs ? old_vs->key.opt.hw_vs.clip_disable : false;
+ bool old_clip_disable = old_vs ? old_vs->key.opt.clip_disable : false;
+ struct si_shader *old_ps = sctx->ps_shader.current;
+ unsigned old_spi_shader_col_format =
+ old_ps ? old_ps->key.part.ps.epilog.spi_shader_col_format : 0;
int r;
compiler_state.tm = sctx->tm;
- compiler_state.debug = sctx->b.debug;
+ compiler_state.debug = sctx->debug;
compiler_state.is_debug_context = sctx->is_debug;
/* Update stages before GS. */
if (r)
return false;
si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4);
- si_update_so(sctx, sctx->tes_shader.cso);
}
} else if (sctx->gs_shader.cso) {
if (sctx->b.chip_class <= VI) {
if (r)
return false;
si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4);
- si_update_so(sctx, sctx->vs_shader.cso);
-
si_pm4_bind_state(sctx, ls, NULL);
si_pm4_bind_state(sctx, hs, NULL);
}
return false;
si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4);
si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4);
- si_update_so(sctx, sctx->gs_shader.cso);
if (!si_update_gs_ring_buffers(sctx))
return false;
si_update_vgt_shader_config(sctx);
- if (old_clip_disable != si_get_vs_state(sctx)->key.opt.hw_vs.clip_disable)
+ if (old_clip_disable != si_get_vs_state(sctx)->key.opt.clip_disable)
si_mark_atom_dirty(sctx, &sctx->clip_regs);
if (sctx->ps_shader.cso) {
si_mark_atom_dirty(sctx, &sctx->spi_map);
}
- if (sctx->screen->b.rbplus_allowed && si_pm4_state_changed(sctx, ps))
+ if (sctx->screen->rbplus_allowed &&
+ si_pm4_state_changed(sctx, ps) &&
+ (!old_ps ||
+ old_spi_shader_col_format !=
+ sctx->ps_shader.current->key.part.ps.epilog.spi_shader_col_format))
si_mark_atom_dirty(sctx, &sctx->cb_render_state);
if (sctx->ps_db_shader_control != db_shader_control) {
sctx->ps_db_shader_control = db_shader_control;
si_mark_atom_dirty(sctx, &sctx->db_render_state);
+ if (sctx->screen->dpbb_allowed)
+ si_mark_atom_dirty(sctx, &sctx->dpbb_state);
}
if (sctx->smoothing_enabled != sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing) {
}
}
- if (si_pm4_state_changed(sctx, ls) ||
- si_pm4_state_changed(sctx, hs) ||
- si_pm4_state_changed(sctx, es) ||
- si_pm4_state_changed(sctx, gs) ||
- si_pm4_state_changed(sctx, vs) ||
- si_pm4_state_changed(sctx, ps)) {
+ if (si_pm4_state_enabled_and_changed(sctx, ls) ||
+ si_pm4_state_enabled_and_changed(sctx, hs) ||
+ si_pm4_state_enabled_and_changed(sctx, es) ||
+ si_pm4_state_enabled_and_changed(sctx, gs) ||
+ si_pm4_state_enabled_and_changed(sctx, vs) ||
+ si_pm4_state_enabled_and_changed(sctx, ps)) {
if (!si_update_spi_tmpring_size(sctx))
return false;
}
- if (sctx->b.chip_class >= CIK)
- si_mark_atom_dirty(sctx, &sctx->prefetch_L2);
+ if (sctx->b.chip_class >= CIK) {
+ if (si_pm4_state_enabled_and_changed(sctx, ls))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_LS;
+ else if (!sctx->queued.named.ls)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_LS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, hs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_HS;
+ else if (!sctx->queued.named.hs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_HS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, es))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_ES;
+ else if (!sctx->queued.named.es)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_ES;
+
+ if (si_pm4_state_enabled_and_changed(sctx, gs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_GS;
+ else if (!sctx->queued.named.gs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_GS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, vs))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_VS;
+ else if (!sctx->queued.named.vs)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_VS;
+
+ if (si_pm4_state_enabled_and_changed(sctx, ps))
+ sctx->prefetch_L2_mask |= SI_PREFETCH_PS;
+ else if (!sctx->queued.named.ps)
+ sctx->prefetch_L2_mask &= ~SI_PREFETCH_PS;
+ }
sctx->do_update_shaders = false;
return true;
}
}
+void *si_get_blit_vs(struct si_context *sctx, enum blitter_attrib_type type,
+ unsigned num_layers)
+{
+ struct pipe_context *pipe = &sctx->b.b;
+ unsigned vs_blit_property;
+ void **vs;
+
+ switch (type) {
+ case UTIL_BLITTER_ATTRIB_NONE:
+ vs = num_layers > 1 ? &sctx->vs_blit_pos_layered :
+ &sctx->vs_blit_pos;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS;
+ break;
+ case UTIL_BLITTER_ATTRIB_COLOR:
+ vs = num_layers > 1 ? &sctx->vs_blit_color_layered :
+ &sctx->vs_blit_color;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR;
+ break;
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
+ assert(num_layers == 1);
+ vs = &sctx->vs_blit_texcoord;
+ vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD;
+ break;
+ default:
+ assert(0);
+ return NULL;
+ }
+ if (*vs)
+ return *vs;
+
+ struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX);
+ if (!ureg)
+ return NULL;
+
+ /* Tell the shader to load VS inputs from SGPRs: */
+ ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS, vs_blit_property);
+ ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true);
+
+ /* This is just a pass-through shader with 1-3 MOV instructions. */
+ ureg_MOV(ureg,
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0),
+ ureg_DECL_vs_input(ureg, 0));
+
+ if (type != UTIL_BLITTER_ATTRIB_NONE) {
+ ureg_MOV(ureg,
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0),
+ ureg_DECL_vs_input(ureg, 1));
+ }
+
+ if (num_layers > 1) {
+ struct ureg_src instance_id =
+ ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
+ struct ureg_dst layer =
+ ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
+
+ ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X),
+ ureg_scalar(instance_id, TGSI_SWIZZLE_X));
+ }
+ ureg_END(ureg);
+
+ *vs = ureg_create_shader_and_destroy(ureg, pipe);
+ return *vs;
+}
+
void si_init_shader_functions(struct si_context *sctx)
{
si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map);