const struct pipe_draw_info *info,
unsigned *num_patches)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_shader *ls_current;
struct si_shader_selector *ls;
/* The TES pointer will only be used for sctx->last_tcs.
num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
}
- input_vertex_size = num_tcs_inputs * 16;
+ input_vertex_size = ls->lshs_vertex_stride;
output_vertex_size = num_tcs_outputs * 16;
input_patch_size = num_tcs_input_cp * input_vertex_size;
* resource usage. Also ensures that the number of tcs in and out
* vertices per threadgroup are at most 256.
*/
- *num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
+ unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
+ *num_patches = 256 / max_verts_per_patch;
/* Make sure that the data fits in LDS. This assumes the shaders only
* use LDS for the inputs and outputs.
(sctx->screen->tess_offchip_block_dw_size * 4) /
output_patch_size);
- /* Not necessary for correctness, but improves performance. The
- * specific value is taken from the proprietary driver.
+ /* Not necessary for correctness, but improves performance.
+ * The hardware can do more, but the radeonsi shader constant is
+ * limited to 6 bits.
*/
- *num_patches = MIN2(*num_patches, 40);
+ *num_patches = MIN2(*num_patches, 63); /* triangles: 3 full waves except 3 lanes */
+
+ /* When distributed tessellation is unsupported, switch between SEs
+ * at a higher frequency to compensate for it.
+ */
+ if (!sctx->screen->has_distributed_tess && sctx->screen->info.max_se > 1)
+ *num_patches = MIN2(*num_patches, 16); /* recommended */
+
+ /* Make sure that vector lanes are reasonably occupied. It probably
+ * doesn't matter much because this is LS-HS, and TES is likely to
+ * occupy significantly more CUs.
+ */
+ unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
+ if (temp_verts_per_tg > 64 && temp_verts_per_tg % 64 < 48)
+ *num_patches = (temp_verts_per_tg & ~63) / max_verts_per_patch;
if (sctx->chip_class == SI) {
/* SI bug workaround, related to power management. Limit LS-HS
* threadgroups to only one wave.
*/
- unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
+ unsigned one_wave = 64 / max_verts_per_patch;
*num_patches = MIN2(*num_patches, one_wave);
}
switch (info->mode) {
case PIPE_PRIM_PATCHES:
return info->count / info->vertices_per_patch;
+ case PIPE_PRIM_POLYGON:
+ return info->count >= 3;
case SI_PRIM_RECTANGLE_LIST:
return info->count / 3;
default:
- return u_prims_for_vertices(info->mode, info->count);
+ return u_decomposed_prims_for_vertices(info->mode, info->count);
}
}
key->u.uses_gs)
partial_vs_wave = true;
- /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
+ /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= VI) */
if (sscreen->has_distributed_tess) {
if (key->u.uses_gs) {
- if (sscreen->info.chip_class <= VI)
+ if (sscreen->info.chip_class == VI)
partial_es_wave = true;
-
- /* GPU hang workaround. */
- if (sscreen->info.family == CHIP_TONGA ||
- sscreen->info.family == CHIP_FIJI ||
- sscreen->info.family == CHIP_POLARIS10 ||
- sscreen->info.family == CHIP_POLARIS11 ||
- sscreen->info.family == CHIP_POLARIS12 ||
- sscreen->info.family == CHIP_VEGAM)
- partial_vs_wave = true;
} else {
partial_vs_wave = true;
}
* Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
* for points, line strips, and tri strips.
*/
- if (sscreen->info.max_se < 4 ||
+ if (sscreen->info.max_se <= 2 ||
key->u.prim == PIPE_PRIM_POLYGON ||
key->u.prim == PIPE_PRIM_LINE_LOOP ||
key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
wd_switch_on_eop = true;
/* Required on CIK and later. */
- if (sscreen->info.max_se > 2 && !wd_switch_on_eop)
+ if (sscreen->info.max_se == 4 && !wd_switch_on_eop)
ia_switch_on_eoi = true;
+ /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
+ * to work around a GS hang.
+ */
+ if (key->u.uses_gs &&
+ (sscreen->info.family == CHIP_TONGA ||
+ sscreen->info.family == CHIP_FIJI ||
+ sscreen->info.family == CHIP_POLARIS10 ||
+ sscreen->info.family == CHIP_POLARIS11 ||
+ sscreen->info.family == CHIP_POLARIS12 ||
+ sscreen->info.family == CHIP_VEGAM))
+ partial_vs_wave = true;
+
/* Required by Hawaii and, for some special cases, by VI. */
if (ia_switch_on_eoi &&
(sscreen->info.family == CHIP_HAWAII ||
key->u.uses_instancing)
partial_vs_wave = true;
+ /* This only applies to Polaris10 and later 4 SE chips.
+ * wd_switch_on_eop is already true on all other chips.
+ */
+ if (!wd_switch_on_eop && key->u.primitive_restart)
+ partial_vs_wave = true;
+
/* If the WD switch is false, the IA switch must be false too. */
assert(wd_switch_on_eop || !ia_switch_on_eop);
}
S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
}
-void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
+static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
{
for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
/* rast_prim is the primitive type after GS. */
static bool si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
/* Skip this if not rendering lines. */
- if (rast_prim != PIPE_PRIM_LINES &&
- rast_prim != PIPE_PRIM_LINE_LOOP &&
- rast_prim != PIPE_PRIM_LINE_STRIP &&
- rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
- rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
+ if (!util_prim_is_lines(rast_prim))
return false;
if (rast_prim == sctx->last_rast_prim &&
}
if (sctx->current_vs_state != sctx->last_vs_state) {
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ /* For the API vertex shader (VS_STATE_INDEXED). */
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
SI_SGPR_VS_STATE_BITS * 4,
sctx->current_vs_state);
+ /* For vertex color clamping, which is done in the last stage
+ * before the rasterizer. */
+ if (sctx->gs_shader.cso || sctx->tes_shader.cso) {
+ /* GS copy shader or TES if GS is missing. */
+ radeon_set_sh_reg(cs,
+ R_00B130_SPI_SHADER_USER_DATA_VS_0 +
+ SI_SGPR_VS_STATE_BITS * 4,
+ sctx->current_vs_state);
+ }
+
sctx->last_vs_state = sctx->current_vs_state;
}
}
const struct pipe_draw_info *info,
unsigned num_patches)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned ia_multi_vgt_param;
/* Draw state. */
if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
if (sctx->chip_class >= GFX9)
- radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
+ radeon_set_uconfig_reg_idx(cs, sctx->screen,
+ R_030960_IA_MULTI_VGT_PARAM, 4,
+ ia_multi_vgt_param);
else if (sctx->chip_class >= CIK)
radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
else
}
if (prim != sctx->last_prim) {
if (sctx->chip_class >= CIK)
- radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
+ radeon_set_uconfig_reg_idx(cs, sctx->screen,
+ R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
else
radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
uint32_t index_max_size = 0;
t->stride_in_dw);
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG) |
COPY_DATA_WR_CONFIRM);
radeon_emit(cs, va); /* src address lo */
}
if (sctx->chip_class >= GFX9) {
- radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
- 2, index_type);
+ radeon_set_uconfig_reg_idx(cs, sctx->screen,
+ R_03090C_VGT_INDEX_TYPE, 2,
+ index_type);
} else {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cs, index_type);
radeon_emit(cs, di_src_sel);
}
} else {
+ unsigned instance_count = info->instance_count;
int base_vertex;
- radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
- radeon_emit(cs, info->instance_count);
+ if (sctx->last_instance_count == SI_INSTANCE_COUNT_UNKNOWN ||
+ sctx->last_instance_count != instance_count) {
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
+ radeon_emit(cs, instance_count);
+ sctx->last_instance_count = instance_count;
+ }
/* Base vertex and start instance. */
base_vertex = index_size ? info->index_bias : info->start;
static void si_emit_surface_sync(struct si_context *sctx,
unsigned cp_coher_cntl)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
if (sctx->chip_class >= GFX9) {
/* Flush caches and wait for the caches to assert idle. */
void si_emit_cache_flush(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint32_t flags = sctx->flags;
uint32_t cp_coher_cntl = 0;
uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
/* Necessary for DCC */
if (sctx->chip_class == VI)
- si_gfx_write_event_eop(sctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
- 0, EOP_DATA_SEL_DISCARD, NULL,
- 0, 0, SI_NOT_QUERY);
+ si_cp_release_mem(sctx,
+ V_028A90_FLUSH_AND_INV_CB_DATA_TS,
+ 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
+ EOP_DATA_SEL_DISCARD, NULL,
+ 0, 0, SI_NOT_QUERY);
}
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
sctx->compute_is_busy) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
sctx->num_cs_flushes++;
sctx->compute_is_busy = false;
}
va = sctx->wait_mem_scratch->gpu_address;
sctx->wait_mem_number++;
- si_gfx_write_event_eop(sctx, cb_db_event, tc_flags,
- EOP_DATA_SEL_VALUE_32BIT,
- sctx->wait_mem_scratch, va,
- sctx->wait_mem_number, SI_NOT_QUERY);
- si_gfx_wait_fence(sctx, va, sctx->wait_mem_number, 0xffffffff);
+ si_cp_release_mem(sctx, cb_db_event, tc_flags,
+ EOP_DST_SEL_MEM,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
+ EOP_DATA_SEL_VALUE_32BIT,
+ sctx->wait_mem_scratch, va,
+ sctx->wait_mem_number, SI_NOT_QUERY);
+ si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff,
+ WAIT_REG_MEM_EQUAL);
}
/* Make sure ME is idle (it executes most packets) before continuing.
unsigned skip_atom_mask)
{
unsigned num_patches = 0;
+ /* Vega10/Raven scissor bug workaround. When any context register is
+ * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
+ * registers must be written too.
+ */
+ bool handle_scissor_bug = (sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
+ !si_is_atom_dirty(sctx, &sctx->atoms.s.scissors);
bool context_roll = false; /* set correctly for GFX9 only */
context_roll |= si_emit_rasterizer_prim_state(sctx);
if (sctx->tes_shader.cso)
context_roll |= si_emit_derived_tess_state(sctx, info, &num_patches);
- if (info->count_from_stream_output)
+
+ if (handle_scissor_bug &&
+ (info->count_from_stream_output ||
+ sctx->dirty_atoms & si_atoms_that_always_roll_context() ||
+ sctx->dirty_states & si_states_that_always_roll_context() ||
+ si_prim_restart_index_changed(sctx, info)))
context_roll = true;
- /* Vega10/Raven scissor bug workaround. When any context register is
- * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
- * registers must be written too.
- */
- if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
- (context_roll ||
- sctx->dirty_atoms & si_atoms_that_roll_context() ||
- sctx->dirty_states & si_states_that_roll_context() ||
- si_prim_restart_index_changed(sctx, info))) {
- sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(sctx, &sctx->atoms.s.scissors);
- }
+ sctx->context_roll_counter = 0;
/* Emit state atoms. */
unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
}
sctx->dirty_states = 0;
+ if (handle_scissor_bug &&
+ (context_roll || sctx->context_roll_counter)) {
+ sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
+ sctx->atoms.s.scissors.emit(sctx);
+ }
+
/* Emit draw states. */
si_emit_vs_state(sctx, info);
si_emit_draw_registers(sctx, info, num_patches);
}
-void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
+static void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
return;
}
- if (unlikely(!sctx->vs_shader.cso)) {
- assert(0);
- return;
- }
- if (unlikely(!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard))) {
- assert(0);
- return;
- }
- if (unlikely(!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES))) {
+ if (unlikely(!sctx->vs_shader.cso ||
+ !rs ||
+ (!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
+ (!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES)))) {
assert(0);
return;
}
rast_prim = info->mode;
if (rast_prim != sctx->current_rast_prim) {
- bool old_is_poly = sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES;
- bool new_is_poly = rast_prim >= PIPE_PRIM_TRIANGLES;
- if (old_is_poly != new_is_poly)
+ if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
+ util_prim_is_points_or_lines(rast_prim))
si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
sctx->current_rast_prim = rast_prim;
pipe_resource_reference(&indexbuf, NULL);
}
-void si_draw_rectangle(struct blitter_context *blitter,
- void *vertex_elements_cso,
- blitter_get_vs_func get_vs,
- int x1, int y1, int x2, int y2,
- float depth, unsigned num_instances,
- enum blitter_attrib_type type,
- const union blitter_attrib *attrib)
+static void
+si_draw_rectangle(struct blitter_context *blitter,
+ void *vertex_elements_cso,
+ blitter_get_vs_func get_vs,
+ int x1, int y1, int x2, int y2,
+ float depth, unsigned num_instances,
+ enum blitter_attrib_type type,
+ const union blitter_attrib *attrib)
{
struct pipe_context *pipe = util_blitter_get_pipe(blitter);
struct si_context *sctx = (struct si_context*)pipe;
case UTIL_BLITTER_ATTRIB_NONE:;
}
- pipe->bind_vs_state(pipe, si_get_blit_vs(sctx, type, num_instances));
+ pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
struct pipe_draw_info info = {};
info.mode = SI_PRIM_RECTANGLE_LIST;
void si_trace_emit(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint64_t va = sctx->current_saved_cs->trace_buf->gpu_address;
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
- radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
+ radeon_emit(cs, S_370_DST_SEL(sctx->chip_class >= CIK ? V_370_MEM
+ : V_370_MEM_GRBM) |
S_370_WR_CONFIRM(1) |
S_370_ENGINE_SEL(V_370_ME));
radeon_emit(cs, va);
if (sctx->log)
u_log_flush(sctx->log);
}
+
+void si_init_draw_functions(struct si_context *sctx)
+{
+ sctx->b.draw_vbo = si_draw_vbo;
+
+ sctx->blitter->draw_rectangle = si_draw_rectangle;
+
+ si_init_ia_multi_vgt_param_table(sctx);
+}