/* When distributed tessellation is unsupported, switch between SEs
* at a higher frequency to compensate for it.
*/
- if (!sctx->screen->has_distributed_tess && sctx->screen->info.max_se > 1)
+ if (!sctx->screen->info.has_distributed_tess && sctx->screen->info.max_se > 1)
*num_patches = MIN2(*num_patches, 16); /* recommended */
/* Make sure that vector lanes are reasonably occupied. It probably
partial_vs_wave = true;
/* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
- if (sscreen->has_distributed_tess) {
+ if (sscreen->info.has_distributed_tess) {
if (key->u.uses_gs) {
if (sscreen->info.chip_class == GFX8)
partial_es_wave = true;
}
}
+static bool si_is_line_stipple_enabled(struct si_context *sctx)
+{
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+
+ return rs->line_stipple_enable &&
+ sctx->current_rast_prim != PIPE_PRIM_POINTS &&
+ (rs->polygon_mode_is_lines ||
+ util_prim_is_lines(sctx->current_rast_prim));
+}
+
static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
const struct pipe_draw_info *info,
enum pipe_prim_type prim,
si_num_prims_for_vertices(info, prim) < primgroup_size));
key.u.primitive_restart = primitive_restart;
key.u.count_from_stream_output = info->count_from_stream_output != NULL;
+ key.u.line_stipple_enabled = si_is_line_stipple_enabled(sctx);
ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
struct radeon_cmdbuf *cs = sctx->gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
+ unsigned initial_cdw = cs->current.cdw;
- if (likely(rast_prim == sctx->last_rast_prim &&
- rs->pa_sc_line_stipple == sctx->last_sc_line_stipple &&
- (sctx->chip_class <= GFX9 ||
- rs->flatshade_first == sctx->last_flatshade_first)))
- return;
-
- if (util_prim_is_lines(rast_prim)) {
+ if (unlikely(si_is_line_stipple_enabled(sctx))) {
/* For lines, reset the stipple pattern at each primitive. Otherwise,
* reset the stipple pattern at each packet (line strips, line loops).
*/
- radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
- rs->pa_sc_line_stipple |
- S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
- sctx->context_roll = true;
+ unsigned value = rs->pa_sc_line_stipple |
+ S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2);
+
+ radeon_opt_set_context_reg(sctx, R_028A0C_PA_SC_LINE_STIPPLE,
+ SI_TRACKED_PA_SC_LINE_STIPPLE, value);
}
- unsigned gs_out = si_conv_prim_to_gs_out(sctx->current_rast_prim);
+ unsigned gs_out_prim = si_conv_prim_to_gs_out(rast_prim);
+ if (unlikely(gs_out_prim != sctx->last_gs_out_prim &&
+ (sctx->ngg || sctx->gs_shader.cso))) {
+ radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
+ sctx->last_gs_out_prim = gs_out_prim;
+ }
- if (rast_prim != sctx->last_rast_prim &&
- (sctx->ngg || sctx->gs_shader.cso)) {
- radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out);
+ if (initial_cdw != cs->current.cdw)
sctx->context_roll = true;
- if (sctx->chip_class >= GFX10) {
- sctx->current_vs_state &= C_VS_STATE_OUTPRIM;
- sctx->current_vs_state |= S_VS_STATE_OUTPRIM(gs_out);
- }
- }
+ if (sctx->ngg) {
+ unsigned vtx_index = rs->flatshade_first ? 0 : gs_out_prim;
- if (sctx->chip_class >= GFX10) {
- unsigned vtx_index = rs->flatshade_first ? 0 : gs_out;
- sctx->current_vs_state &= C_VS_STATE_PROVOKING_VTX_INDEX;
- sctx->current_vs_state |= S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index);
+ sctx->current_vs_state &= C_VS_STATE_OUTPRIM &
+ C_VS_STATE_PROVOKING_VTX_INDEX;
+ sctx->current_vs_state |= S_VS_STATE_OUTPRIM(gs_out_prim) |
+ S_VS_STATE_PROVOKING_VTX_INDEX(vtx_index);
}
-
- sctx->last_rast_prim = rast_prim;
- sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
- sctx->last_flatshade_first = rs->flatshade_first;
}
static void si_emit_vs_state(struct si_context *sctx,
}
/* For NGG: */
- if (sctx->chip_class >= GFX10 &&
+ if (sctx->screen->use_ngg &&
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] !=
R_00B230_SPI_SHADER_USER_DATA_GS_0) {
radeon_set_sh_reg(cs,
*/
static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
{
+ union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
unsigned ge_cntl;
if (sctx->ngg) {
- ge_cntl = si_get_vs_state(sctx)->ge_cntl |
- S_03096C_PACKET_TO_ONE_PA(sctx->ia_multi_vgt_param_key.u.line_stipple_enabled);
+ if (sctx->tes_shader.cso) {
+ ge_cntl = S_03096C_PRIM_GRP_SIZE(num_patches) |
+ S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
+ S_03096C_BREAK_WAVE_AT_EOI(key.u.tess_uses_prim_id);
+ } else {
+ ge_cntl = si_get_vs_state(sctx)->ge_cntl;
+ }
} else {
- union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
unsigned primgroup_size;
- unsigned vertgroup_size;
+ unsigned vertgroup_size = 256; /* 256 = disable vertex grouping */;
if (sctx->tes_shader.cso) {
primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
- vertgroup_size = 0;
} else if (sctx->gs_shader.cso) {
unsigned vgt_gs_onchip_cntl = sctx->gs_shader.current->ctx_reg.gs.vgt_gs_onchip_cntl;
primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
- vertgroup_size = G_028A44_ES_VERTS_PER_SUBGRP(vgt_gs_onchip_cntl);
} else {
primgroup_size = 128; /* recommended without a GS and tess */
- vertgroup_size = 0;
}
ge_cntl = S_03096C_PRIM_GRP_SIZE(primgroup_size) |
S_03096C_VERT_GRP_SIZE(vertgroup_size) |
- S_03096C_BREAK_WAVE_AT_EOI(key.u.uses_tess && key.u.tess_uses_prim_id) |
- S_03096C_PACKET_TO_ONE_PA(key.u.line_stipple_enabled);
+ S_03096C_BREAK_WAVE_AT_EOI(key.u.uses_tess && key.u.tess_uses_prim_id);
}
+ ge_cntl |= S_03096C_PACKET_TO_ONE_PA(si_is_line_stipple_enabled(sctx));
+
if (ge_cntl != sctx->last_multi_vgt_param) {
radeon_set_uconfig_reg(sctx->gfx_cs, R_03096C_GE_CNTL, ge_cntl);
sctx->last_multi_vgt_param = ge_cntl;
bool primitive_restart)
{
struct radeon_cmdbuf *cs = sctx->gfx_cs;
- unsigned vgt_prim = si_conv_pipe_prim(info->mode);
+ unsigned vgt_prim = si_conv_pipe_prim(prim);
if (sctx->chip_class >= GFX10)
gfx10_emit_ge_cntl(sctx, num_patches);
instance_count, primitive_restart);
if (vgt_prim != sctx->last_prim) {
- if (sctx->chip_class >= GFX7)
+ if (sctx->chip_class >= GFX10)
+ radeon_set_uconfig_reg(cs, R_030908_VGT_PRIMITIVE_TYPE, vgt_prim);
+ else if (sctx->chip_class >= GFX7)
radeon_set_uconfig_reg_idx(cs, sctx->screen,
R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
else
if (original_index_size) {
index_max_size = (indexbuf->width0 - index_offset) /
original_index_size;
+ /* Skip draw calls with 0-sized index buffers.
+ * They cause a hang on some chips, like Navi10-14.
+ */
+ if (!index_max_size)
+ return;
+
index_va = si_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
}
/* We don't need these. */
- assert(!(flags & (SI_CONTEXT_VGT_FLUSH |
- SI_CONTEXT_VGT_STREAMOUT_SYNC |
+ assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC |
SI_CONTEXT_FLUSH_AND_INV_DB_META)));
+ if (flags & SI_CONTEXT_VGT_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
ctx->num_cb_cache_flushes++;
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
}
if (flags & SI_CONTEXT_INV_VCACHE)
gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
+
+ /* The L2 cache ops are:
+ * - INV: - invalidate lines that reflect memory (were loaded from memory)
+ * - don't touch lines that were overwritten (were stored by gfx clients)
+ * - WB: - don't touch lines that reflect memory
+ * - write back lines that were overwritten
+ * - WB | INV: - invalidate lines that reflect memory
+ * - write back lines that were overwritten
+ *
+ * GLM doesn't support WB alone. If WB is set, INV must be set too.
+ */
if (flags & SI_CONTEXT_INV_L2) {
/* Writeback and invalidate everything in L2. */
- gcr_cntl |= S_586_GL2_INV(1) | S_586_GLM_INV(1);
+ gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) |
+ S_586_GLM_INV(1) | S_586_GLM_WB(1);
ctx->num_L2_invalidates++;
} else if (flags & SI_CONTEXT_WB_L2) {
- /* Writeback but do not invalidate. */
- gcr_cntl |= S_586_GL2_WB(1);
+ gcr_cntl |= S_586_GL2_WB(1) |
+ S_586_GLM_WB(1) | S_586_GLM_INV(1);
+ } else if (flags & SI_CONTEXT_INV_L2_METADATA) {
+ gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
}
- if (flags & SI_CONTEXT_INV_L2_METADATA)
- gcr_cntl |= S_586_GLM_INV(1);
if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
return;
}
- if (unlikely(!sctx->vs_shader.cso ||
- !rs ||
+ struct si_shader_selector *vs = sctx->vs_shader.cso;
+ if (unlikely(!vs ||
+ sctx->num_vertex_elements < vs->num_vs_inputs ||
(!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
(!!sctx->tes_shader.cso != (prim == PIPE_PRIM_PATCHES)))) {
assert(0);
}
if (sctx->tes_shader.cso &&
- sctx->screen->has_ls_vgpr_init_bug) {
+ sctx->screen->info.has_ls_vgpr_init_bug) {
/* Determine whether the LS VGPR fix should be applied.
*
* It is only required when num input CPs > num output CPs,
(!sctx->tes_shader.cso || pd_msg("uses tess")) &&
(!sctx->gs_shader.cso || pd_msg("uses GS")) &&
(!sctx->ps_shader.cso->info.uses_primid || pd_msg("PS uses PrimID")) &&
+ !rs->polygon_mode_enabled &&
#if SI_PRIM_DISCARD_DEBUG /* same as cso->prim_discard_cs_allowed */
(!sctx->vs_shader.cso->info.uses_bindless_images || pd_msg("uses bindless images")) &&
(!sctx->vs_shader.cso->info.uses_bindless_samplers || pd_msg("uses bindless samplers")) &&
* written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
* registers must be written too.
*/
- bool has_gfx9_scissor_bug = sctx->screen->has_gfx9_scissor_bug;
unsigned masked_atoms = 0;
- if (has_gfx9_scissor_bug) {
+ if (sctx->screen->info.has_gfx9_scissor_bug) {
masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
if (info->count_from_stream_output ||
if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
sctx->atoms.s.render_cond.emit(sctx);
- if (has_gfx9_scissor_bug &&
+ if (sctx->screen->info.has_gfx9_scissor_bug &&
(sctx->context_roll ||
si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
sctx->atoms.s.scissors.emit(sctx);
si_emit_all_states(sctx, info, prim, instance_count,
primitive_restart, masked_atoms);
- if (has_gfx9_scissor_bug &&
+ if (sctx->screen->info.has_gfx9_scissor_bug &&
(sctx->context_roll ||
si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
sctx->atoms.s.scissors.emit(sctx);
cik_emit_prefetch_L2(sctx, false);
}
+ /* Mark the displayable dcc buffer as dirty in order to update
+ * it on the next call to si_flush_resource. */
+ if (sctx->screen->info.use_display_dcc_with_retile_blit) {
+ /* Don't use si_update_fb_dirtiness_after_rendering because it'll
+ * cause unnecessary texture decompressions on each draw. */
+ unsigned displayable_dcc_cb_mask = sctx->framebuffer.displayable_dcc_cb_mask;
+ while (displayable_dcc_cb_mask) {
+ unsigned i = u_bit_scan(&displayable_dcc_cb_mask);
+ struct pipe_surface *surf = sctx->framebuffer.state.cbufs[i];
+ struct si_texture *tex = (struct si_texture*) surf->texture;
+ tex->displayable_dcc_dirty = true;
+ }
+ }
+
/* Clear the context roll flag after the draw call. */
sctx->context_roll = false;