* The information about LDS and other non-compile-time parameters is then
* written to userdata SGPRs.
*/
-static void si_emit_derived_tess_state(struct si_context *sctx,
+static bool si_emit_derived_tess_state(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned *num_patches)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_shader *ls_current;
struct si_shader_selector *ls;
/* The TES pointer will only be used for sctx->last_tcs.
(!has_primid_instancing_bug ||
(sctx->last_tess_uses_primid == tess_uses_primid))) {
*num_patches = sctx->last_num_patches;
- return;
+ return false;
}
sctx->last_ls = ls_current;
num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
}
- input_vertex_size = num_tcs_inputs * 16;
+ input_vertex_size = ls->lshs_vertex_stride;
output_vertex_size = num_tcs_outputs * 16;
input_patch_size = num_tcs_input_cp * input_vertex_size;
* resource usage. Also ensures that the number of tcs in and out
* vertices per threadgroup are at most 256.
*/
- *num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
+ unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
+ *num_patches = 256 / max_verts_per_patch;
/* Make sure that the data fits in LDS. This assumes the shaders only
* use LDS for the inputs and outputs.
(sctx->screen->tess_offchip_block_dw_size * 4) /
output_patch_size);
- /* Not necessary for correctness, but improves performance. The
- * specific value is taken from the proprietary driver.
+ /* Not necessary for correctness, but improves performance.
+ * The hardware can do more, but the radeonsi shader constant is
+ * limited to 6 bits.
+ */
+ *num_patches = MIN2(*num_patches, 63); /* triangles: 3 full waves except 3 lanes */
+
+ /* When distributed tessellation is unsupported, switch between SEs
+ * at a higher frequency to compensate for it.
+ */
+ if (!sctx->screen->has_distributed_tess && sctx->screen->info.max_se > 1)
+ *num_patches = MIN2(*num_patches, 16); /* recommended */
+
+ /* Make sure that vector lanes are reasonably occupied. It probably
+ * doesn't matter much because this is LS-HS, and TES is likely to
+ * occupy significantly more CUs.
*/
- *num_patches = MIN2(*num_patches, 40);
+ unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
+ if (temp_verts_per_tg > 64 && temp_verts_per_tg % 64 < 48)
+ *num_patches = (temp_verts_per_tg & ~63) / max_verts_per_patch;
if (sctx->chip_class == SI) {
/* SI bug workaround, related to power management. Limit LS-HS
* threadgroups to only one wave.
*/
- unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
+ unsigned one_wave = 64 / max_verts_per_patch;
*num_patches = MIN2(*num_patches, one_wave);
}
ls_hs_config);
}
sctx->last_ls_hs_config = ls_hs_config;
+ return true; /* true if the context rolls */
}
+ return false;
}
static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info)
}
/* rast_prim is the primitive type after GS. */
-static void si_emit_rasterizer_prim_state(struct si_context *sctx)
+static bool si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
- struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
/* Skip this if not rendering lines. */
- if (rast_prim != PIPE_PRIM_LINES &&
- rast_prim != PIPE_PRIM_LINE_LOOP &&
- rast_prim != PIPE_PRIM_LINE_STRIP &&
- rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
- rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
- return;
+ if (!util_prim_is_lines(rast_prim))
+ return false;
if (rast_prim == sctx->last_rast_prim &&
rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
- return;
+ return false;
/* For lines, reset the stipple pattern at each primitive. Otherwise,
* reset the stipple pattern at each packet (line strips, line loops).
sctx->last_rast_prim = rast_prim;
sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
+ return true; /* true if the context rolls */
}
static void si_emit_vs_state(struct si_context *sctx,
}
if (sctx->current_vs_state != sctx->last_vs_state) {
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ /* For the API vertex shader (VS_STATE_INDEXED). */
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
SI_SGPR_VS_STATE_BITS * 4,
sctx->current_vs_state);
+ /* For vertex color clamping, which is done in the last stage
+ * before the rasterizer. */
+ if (sctx->gs_shader.cso || sctx->tes_shader.cso) {
+ /* GS copy shader or TES if GS is missing. */
+ radeon_set_sh_reg(cs,
+ R_00B130_SPI_SHADER_USER_DATA_VS_0 +
+ SI_SGPR_VS_STATE_BITS * 4,
+ sctx->current_vs_state);
+ }
+
sctx->last_vs_state = sctx->current_vs_state;
}
}
+static inline bool si_prim_restart_index_changed(struct si_context *sctx,
+ const struct pipe_draw_info *info)
+{
+ return info->primitive_restart &&
+ (info->restart_index != sctx->last_restart_index ||
+ sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
+}
+
static void si_emit_draw_registers(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned num_patches)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned ia_multi_vgt_param;
sctx->last_primitive_restart_en = info->primitive_restart;
}
- if (info->primitive_restart &&
- (info->restart_index != sctx->last_restart_index ||
- sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
+ if (si_prim_restart_index_changed(sctx, info)) {
radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
info->restart_index);
sctx->last_restart_index = info->restart_index;
unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
uint32_t index_max_size = 0;
static void si_emit_surface_sync(struct si_context *sctx,
unsigned cp_coher_cntl)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
if (sctx->chip_class >= GFX9) {
/* Flush caches and wait for the caches to assert idle. */
void si_emit_cache_flush(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint32_t flags = sctx->flags;
uint32_t cp_coher_cntl = 0;
uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
unsigned skip_atom_mask)
{
+ unsigned num_patches = 0;
+ bool context_roll = false; /* set correctly for GFX9 only */
+
+ context_roll |= si_emit_rasterizer_prim_state(sctx);
+ if (sctx->tes_shader.cso)
+ context_roll |= si_emit_derived_tess_state(sctx, info, &num_patches);
+ if (info->count_from_stream_output)
+ context_roll = true;
+
+ /* Vega10/Raven scissor bug workaround. When any context register is
+ * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
+ * registers must be written too.
+ */
+ if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
+ (context_roll ||
+ sctx->dirty_atoms & si_atoms_that_roll_context() ||
+ sctx->dirty_states & si_states_that_roll_context() ||
+ si_prim_restart_index_changed(sctx, info))) {
+ sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scissors);
+ }
+
/* Emit state atoms. */
unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
while (mask)
sctx->dirty_states = 0;
/* Emit draw states. */
- unsigned num_patches = 0;
-
- si_emit_rasterizer_prim_state(sctx);
- if (sctx->tes_shader.cso)
- si_emit_derived_tess_state(sctx, info, &num_patches);
si_emit_vs_state(sctx, info);
si_emit_draw_registers(sctx, info, num_patches);
}
return;
}
- if (unlikely(!sctx->vs_shader.cso)) {
- assert(0);
- return;
- }
- if (unlikely(!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard))) {
- assert(0);
- return;
- }
- if (unlikely(!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES))) {
+ if (unlikely(!sctx->vs_shader.cso ||
+ !rs ||
+ (!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
+ (!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES)))) {
assert(0);
return;
}
rast_prim = info->mode;
if (rast_prim != sctx->current_rast_prim) {
- bool old_is_poly = sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES;
- bool new_is_poly = rast_prim >= PIPE_PRIM_TRIANGLES;
- if (old_is_poly != new_is_poly) {
- sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(sctx, &sctx->atoms.s.scissors);
- }
+ if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
+ util_prim_is_points_or_lines(rast_prim))
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
sctx->current_rast_prim = rast_prim;
sctx->do_update_shaders = true;
if (!si_upload_vertex_buffer_descriptors(sctx))
return;
- /* Vega10/Raven scissor bug workaround. This must be done before VPORT
- * scissor registers are changed. There is also a more efficient but
- * more involved alternative workaround.
- */
- if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
- si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)) {
- sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
- si_emit_cache_flush(sctx);
- }
-
/* Use optimal packet order based on whether we need to sync the pipeline. */
if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
SI_CONTEXT_FLUSH_AND_INV_DB |
case UTIL_BLITTER_ATTRIB_NONE:;
}
- pipe->bind_vs_state(pipe, si_get_blit_vs(sctx, type, num_instances));
+ pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
struct pipe_draw_info info = {};
info.mode = SI_PRIM_RECTANGLE_LIST;
void si_trace_emit(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint64_t va = sctx->current_saved_cs->trace_buf->gpu_address;
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;