*/
#include "si_build_pm4.h"
-#include "gfx9d.h"
+#include "sid.h"
#include "util/u_index_modify.h"
#include "util/u_log.h"
#include "util/u_upload_mgr.h"
#include "util/u_prim.h"
+#include "util/u_suballoc.h"
#include "ac_debug.h"
return prim_conv[mode];
}
-static unsigned si_conv_prim_to_gs_out(unsigned mode)
-{
- static const int prim_conv[] = {
- [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
- [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
- [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
- [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
- [SI_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
- };
- assert(mode < ARRAY_SIZE(prim_conv));
-
- return prim_conv[mode];
-}
-
/**
* This calculates the LDS size for tessellation shaders (VS, TCS, TES).
* LS.LDS_SIZE is shared by all 3 shader stages.
const struct pipe_draw_info *info,
unsigned *num_patches)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_shader *ls_current;
struct si_shader_selector *ls;
/* The TES pointer will only be used for sctx->last_tcs.
struct si_shader_selector *tcs =
sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
- bool has_primid_instancing_bug = sctx->chip_class == SI &&
+ bool has_primid_instancing_bug = sctx->chip_class == GFX6 &&
sctx->screen->info.max_se == 1;
unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
unsigned num_tcs_input_cp = info->vertices_per_patch;
num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
}
- input_vertex_size = num_tcs_inputs * 16;
+ input_vertex_size = ls->lshs_vertex_stride;
output_vertex_size = num_tcs_outputs * 16;
input_patch_size = num_tcs_input_cp * input_vertex_size;
* resource usage. Also ensures that the number of tcs in and out
* vertices per threadgroup are at most 256.
*/
- *num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4;
+ unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
+ *num_patches = 256 / max_verts_per_patch;
/* Make sure that the data fits in LDS. This assumes the shaders only
* use LDS for the inputs and outputs.
*
- * While CIK can use 64K per threadgroup, there is a hang on Stoney
+ * While GFX7 can use 64K per threadgroup, there is a hang on Stoney
* with 2 CUs if we use more than 32K. The closed Vulkan driver also
* uses 32K at most on all GCN chips.
*/
(sctx->screen->tess_offchip_block_dw_size * 4) /
output_patch_size);
- /* Not necessary for correctness, but improves performance. The
- * specific value is taken from the proprietary driver.
+ /* Not necessary for correctness, but improves performance.
+ * The hardware can do more, but the radeonsi shader constant is
+ * limited to 6 bits.
+ */
+ *num_patches = MIN2(*num_patches, 63); /* triangles: 3 full waves except 3 lanes */
+
+ /* When distributed tessellation is unsupported, switch between SEs
+ * at a higher frequency to compensate for it.
+ */
+ if (!sctx->screen->has_distributed_tess && sctx->screen->info.max_se > 1)
+ *num_patches = MIN2(*num_patches, 16); /* recommended */
+
+ /* Make sure that vector lanes are reasonably occupied. It probably
+ * doesn't matter much because this is LS-HS, and TES is likely to
+ * occupy significantly more CUs.
*/
- *num_patches = MIN2(*num_patches, 40);
+ unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
+ if (temp_verts_per_tg > 64 && temp_verts_per_tg % 64 < 48)
+ *num_patches = (temp_verts_per_tg & ~63) / max_verts_per_patch;
- if (sctx->chip_class == SI) {
- /* SI bug workaround, related to power management. Limit LS-HS
+ if (sctx->chip_class == GFX6) {
+ /* GFX6 bug workaround, related to power management. Limit LS-HS
* threadgroups to only one wave.
*/
- unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
+ unsigned one_wave = 64 / max_verts_per_patch;
*num_patches = MIN2(*num_patches, one_wave);
}
* The intended solution is to restrict threadgroups to
* a single instance by setting SWITCH_ON_EOI, which
* should cause IA to split instances up. However, this
- * doesn't work correctly on SI when there is no other
+ * doesn't work correctly on GFX6 when there is no other
* SE to switch to.
*/
if (has_primid_instancing_bug && tess_uses_primid)
assert(num_tcs_input_cp <= 32);
assert(num_tcs_output_cp <= 32);
- uint64_t ring_va = r600_resource(sctx->tess_rings)->gpu_address;
+ uint64_t ring_va = si_resource(sctx->tess_rings)->gpu_address;
assert((ring_va & u_bit_consecutive(0, 19)) == 0);
tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
/* Compute the LDS size. */
lds_size = output_patch0_offset + output_patch_size * *num_patches;
- if (sctx->chip_class >= CIK) {
+ if (sctx->chip_class >= GFX7) {
assert(lds_size <= 65536);
lds_size = align(lds_size, 512) / 512;
} else {
C_VS_STATE_LS_OUT_VERTEX_SIZE;
sctx->current_vs_state |= tcs_in_layout;
+ /* We should be able to support in-shader LDS use with LLVM >= 9
+ * by just adding the lds_sizes together, but it has never
+ * been tested. */
+ assert(ls_current->config.lds_size == 0);
+
if (sctx->chip_class >= GFX9) {
unsigned hs_rsrc2 = ls_current->config.rsrc2 |
- S_00B42C_LDS_SIZE(lds_size);
+ S_00B42C_LDS_SIZE_GFX9(lds_size);
radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
/* Due to a hw bug, RSRC2_LS must be written twice with another
* LS register written in between. */
- if (sctx->chip_class == CIK && sctx->family != CHIP_HAWAII)
+ if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
radeon_emit(cs, ls_current->config.rsrc1);
S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
- if (sctx->chip_class >= CIK)
- radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2,
- ls_hs_config);
- else
- radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG,
- ls_hs_config);
+ if (sctx->last_ls_hs_config != ls_hs_config) {
+ if (sctx->chip_class >= GFX7) {
+ radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2,
+ ls_hs_config);
+ } else {
+ radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG,
+ ls_hs_config);
+ }
+ sctx->last_ls_hs_config = ls_hs_config;
+ sctx->context_roll = true;
+ }
}
-static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info)
+static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info,
+ enum pipe_prim_type prim)
{
- switch (info->mode) {
+ switch (prim) {
case PIPE_PRIM_PATCHES:
return info->count / info->vertices_per_patch;
+ case PIPE_PRIM_POLYGON:
+ return info->count >= 3;
case SI_PRIM_RECTANGLE_LIST:
return info->count / 3;
default:
- return u_prims_for_vertices(info->mode, info->count);
+ return u_decomposed_prims_for_vertices(prim, info->count);
}
}
key->u.uses_gs)
partial_vs_wave = true;
- /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
+ /* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
if (sscreen->has_distributed_tess) {
if (key->u.uses_gs) {
- if (sscreen->info.chip_class <= VI)
+ if (sscreen->info.chip_class == GFX8)
partial_es_wave = true;
-
- /* GPU hang workaround. */
- if (sscreen->info.family == CHIP_TONGA ||
- sscreen->info.family == CHIP_FIJI ||
- sscreen->info.family == CHIP_POLARIS10 ||
- sscreen->info.family == CHIP_POLARIS11 ||
- sscreen->info.family == CHIP_POLARIS12)
- partial_vs_wave = true;
} else {
partial_vs_wave = true;
}
wd_switch_on_eop = true;
}
- if (sscreen->info.chip_class >= CIK) {
+ if (sscreen->info.chip_class >= GFX7) {
/* WD_SWITCH_ON_EOP has no effect on GPUs with less than
* 4 shader engines. Set 1 to pass the assertion below.
* The other cases are hardware requirements.
* Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
* for points, line strips, and tri strips.
*/
- if (sscreen->info.max_se < 4 ||
+ if (sscreen->info.max_se <= 2 ||
key->u.prim == PIPE_PRIM_POLYGON ||
key->u.prim == PIPE_PRIM_LINE_LOOP ||
key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
* Assume indirect draws always use small instances.
* This is needed for good VS wave utilization.
*/
- if (sscreen->info.chip_class <= VI &&
+ if (sscreen->info.chip_class <= GFX8 &&
sscreen->info.max_se == 4 &&
key->u.multi_instances_smaller_than_primgroup)
wd_switch_on_eop = true;
- /* Required on CIK and later. */
- if (sscreen->info.max_se > 2 && !wd_switch_on_eop)
+ /* Required on GFX7 and later. */
+ if (sscreen->info.max_se == 4 && !wd_switch_on_eop)
ia_switch_on_eoi = true;
- /* Required by Hawaii and, for some special cases, by VI. */
+ /* HW engineers suggested that PARTIAL_VS_WAVE_ON should be set
+ * to work around a GS hang.
+ */
+ if (key->u.uses_gs &&
+ (sscreen->info.family == CHIP_TONGA ||
+ sscreen->info.family == CHIP_FIJI ||
+ sscreen->info.family == CHIP_POLARIS10 ||
+ sscreen->info.family == CHIP_POLARIS11 ||
+ sscreen->info.family == CHIP_POLARIS12 ||
+ sscreen->info.family == CHIP_VEGAM))
+ partial_vs_wave = true;
+
+ /* Required by Hawaii and, for some special cases, by GFX8. */
if (ia_switch_on_eoi &&
(sscreen->info.family == CHIP_HAWAII ||
- (sscreen->info.chip_class == VI &&
+ (sscreen->info.chip_class == GFX8 &&
(key->u.uses_gs || max_primgroup_in_wave != 2))))
partial_vs_wave = true;
key->u.uses_instancing)
partial_vs_wave = true;
+ /* This only applies to Polaris10 and later 4 SE chips.
+ * wd_switch_on_eop is already true on all other chips.
+ */
+ if (!wd_switch_on_eop && key->u.primitive_restart)
+ partial_vs_wave = true;
+
/* If the WD switch is false, the IA switch must be false too. */
assert(wd_switch_on_eop || !ia_switch_on_eop);
}
/* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
- if (sscreen->info.chip_class <= VI && ia_switch_on_eoi)
+ if (sscreen->info.chip_class <= GFX8 && ia_switch_on_eoi)
partial_es_wave = true;
return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
- S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= CIK ? wd_switch_on_eop : 0) |
+ S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= GFX7 ? wd_switch_on_eop : 0) |
/* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
- S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == VI ?
+ S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == GFX8 ?
max_primgroup_in_wave : 0) |
S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
}
-void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
+static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
{
for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
const struct pipe_draw_info *info,
- unsigned num_patches)
+ enum pipe_prim_type prim,
+ unsigned num_patches,
+ unsigned instance_count,
+ bool primitive_restart)
{
union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
unsigned primgroup_size;
primgroup_size = 128; /* recommended without a GS and tess */
}
- key.u.prim = info->mode;
- key.u.uses_instancing = info->indirect || info->instance_count > 1;
+ key.u.prim = prim;
+ key.u.uses_instancing = info->indirect || instance_count > 1;
key.u.multi_instances_smaller_than_primgroup =
info->indirect ||
- (info->instance_count > 1 &&
+ (instance_count > 1 &&
(info->count_from_stream_output ||
- si_num_prims_for_vertices(info) < primgroup_size));
- key.u.primitive_restart = info->primitive_restart;
+ si_num_prims_for_vertices(info, prim) < primgroup_size));
+ key.u.primitive_restart = primitive_restart;
key.u.count_from_stream_output = info->count_from_stream_output != NULL;
ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
if (sctx->gs_shader.cso) {
/* GS requirement. */
- if (sctx->chip_class <= VI &&
+ if (sctx->chip_class <= GFX8 &&
SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
if (sctx->family == CHIP_HAWAII &&
G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
(info->indirect ||
- (info->instance_count > 1 &&
+ (instance_count > 1 &&
(info->count_from_stream_output ||
- si_num_prims_for_vertices(info) <= 1))))
+ si_num_prims_for_vertices(info, prim) <= 1))))
sctx->flags |= SI_CONTEXT_VGT_FLUSH;
}
/* rast_prim is the primitive type after GS. */
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
- struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
/* Skip this if not rendering lines. */
- if (rast_prim != PIPE_PRIM_LINES &&
- rast_prim != PIPE_PRIM_LINE_LOOP &&
- rast_prim != PIPE_PRIM_LINE_STRIP &&
- rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
- rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
+ if (!util_prim_is_lines(rast_prim))
return;
if (rast_prim == sctx->last_rast_prim &&
sctx->last_rast_prim = rast_prim;
sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
+ sctx->context_roll = true;
}
static void si_emit_vs_state(struct si_context *sctx,
}
if (sctx->current_vs_state != sctx->last_vs_state) {
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ /* For the API vertex shader (VS_STATE_INDEXED). */
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
SI_SGPR_VS_STATE_BITS * 4,
sctx->current_vs_state);
+ /* For vertex color clamping, which is done in the last stage
+ * before the rasterizer. */
+ if (sctx->gs_shader.cso || sctx->tes_shader.cso) {
+ /* GS copy shader or TES if GS is missing. */
+ radeon_set_sh_reg(cs,
+ R_00B130_SPI_SHADER_USER_DATA_VS_0 +
+ SI_SGPR_VS_STATE_BITS * 4,
+ sctx->current_vs_state);
+ }
+
sctx->last_vs_state = sctx->current_vs_state;
}
}
+static inline bool si_prim_restart_index_changed(struct si_context *sctx,
+ bool primitive_restart,
+ unsigned restart_index)
+{
+ return primitive_restart &&
+ (restart_index != sctx->last_restart_index ||
+ sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
+}
+
static void si_emit_draw_registers(struct si_context *sctx,
const struct pipe_draw_info *info,
- unsigned num_patches)
+ enum pipe_prim_type prim,
+ unsigned num_patches,
+ unsigned instance_count,
+ bool primitive_restart)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
- unsigned prim = si_conv_pipe_prim(info->mode);
- unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ unsigned vgt_prim = si_conv_pipe_prim(prim);
unsigned ia_multi_vgt_param;
- ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
+ ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, prim, num_patches,
+ instance_count, primitive_restart);
/* Draw state. */
if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
if (sctx->chip_class >= GFX9)
- radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
- else if (sctx->chip_class >= CIK)
+ radeon_set_uconfig_reg_idx(cs, sctx->screen,
+ R_030960_IA_MULTI_VGT_PARAM, 4,
+ ia_multi_vgt_param);
+ else if (sctx->chip_class >= GFX7)
radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
else
radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
sctx->last_multi_vgt_param = ia_multi_vgt_param;
}
- if (prim != sctx->last_prim) {
- if (sctx->chip_class >= CIK)
- radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
+ if (vgt_prim != sctx->last_prim) {
+ if (sctx->chip_class >= GFX7)
+ radeon_set_uconfig_reg_idx(cs, sctx->screen,
+ R_030908_VGT_PRIMITIVE_TYPE, 1, vgt_prim);
else
- radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
+ radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, vgt_prim);
- sctx->last_prim = prim;
- }
-
- if (gs_out_prim != sctx->last_gs_out_prim) {
- radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
- sctx->last_gs_out_prim = gs_out_prim;
+ sctx->last_prim = vgt_prim;
}
/* Primitive restart. */
- if (info->primitive_restart != sctx->last_primitive_restart_en) {
+ if (primitive_restart != sctx->last_primitive_restart_en) {
if (sctx->chip_class >= GFX9)
radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
- info->primitive_restart);
+ primitive_restart);
else
radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
- info->primitive_restart);
+ primitive_restart);
- sctx->last_primitive_restart_en = info->primitive_restart;
+ sctx->last_primitive_restart_en = primitive_restart;
}
- if (info->primitive_restart &&
- (info->restart_index != sctx->last_restart_index ||
- sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
+ if (si_prim_restart_index_changed(sctx, primitive_restart, info->restart_index)) {
radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
info->restart_index);
sctx->last_restart_index = info->restart_index;
+ sctx->context_roll = true;
}
}
const struct pipe_draw_info *info,
struct pipe_resource *indexbuf,
unsigned index_size,
- unsigned index_offset)
+ unsigned index_offset,
+ unsigned instance_count,
+ bool dispatch_prim_discard_cs,
+ unsigned original_index_size)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
uint32_t index_max_size = 0;
if (info->count_from_stream_output) {
struct si_streamout_target *t =
(struct si_streamout_target*)info->count_from_stream_output;
- uint64_t va = t->buf_filled_size->gpu_address +
- t->buf_filled_size_offset;
radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
t->stride_in_dw);
-
- radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG) |
- COPY_DATA_WR_CONFIRM);
- radeon_emit(cs, va); /* src address lo */
- radeon_emit(cs, va >> 32); /* src address hi */
- radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
- radeon_emit(cs, 0); /* unused */
-
- radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- t->buf_filled_size, RADEON_USAGE_READ,
- RADEON_PRIO_SO_FILLED_SIZE);
+ si_cp_copy_data(sctx, sctx->gfx_cs,
+ COPY_DATA_REG, NULL,
+ R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2,
+ COPY_DATA_SRC_MEM, t->buf_filled_size,
+ t->buf_filled_size_offset);
}
/* draw packet */
break;
case 2:
index_type = V_028A7C_VGT_INDEX_16 |
- (SI_BIG_ENDIAN && sctx->chip_class <= CIK ?
+ (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ?
V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
break;
case 4:
index_type = V_028A7C_VGT_INDEX_32 |
- (SI_BIG_ENDIAN && sctx->chip_class <= CIK ?
+ (SI_BIG_ENDIAN && sctx->chip_class <= GFX7 ?
V_028A7C_VGT_DMA_SWAP_32_BIT : 0);
break;
default:
}
if (sctx->chip_class >= GFX9) {
- radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
- 2, index_type);
+ radeon_set_uconfig_reg_idx(cs, sctx->screen,
+ R_03090C_VGT_INDEX_TYPE, 2,
+ index_type);
} else {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cs, index_type);
sctx->last_index_size = index_size;
}
- index_max_size = (indexbuf->width0 - index_offset) /
- index_size;
- index_va = r600_resource(indexbuf)->gpu_address + index_offset;
+ if (original_index_size) {
+ index_max_size = (indexbuf->width0 - index_offset) /
+ original_index_size;
+ index_va = si_resource(indexbuf)->gpu_address + index_offset;
- radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource *)indexbuf,
- RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
+ si_resource(indexbuf),
+ RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
+ }
} else {
- /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
+ /* On GFX7 and later, non-indexed draws overwrite VGT_INDEX_TYPE,
* so the state must be re-emitted before the next indexed draw.
*/
- if (sctx->chip_class >= CIK)
+ if (sctx->chip_class >= GFX7)
sctx->last_index_size = -1;
}
if (indirect) {
- uint64_t indirect_va = r600_resource(indirect->buffer)->gpu_address;
+ uint64_t indirect_va = si_resource(indirect->buffer)->gpu_address;
assert(indirect_va % 8 == 0);
radeon_emit(cs, indirect_va >> 32);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource *)indirect->buffer,
+ si_resource(indirect->buffer),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
uint64_t count_va = 0;
if (indirect->indirect_draw_count) {
- struct r600_resource *params_buf =
- (struct r600_resource *)indirect->indirect_draw_count;
+ struct si_resource *params_buf =
+ si_resource(indirect->indirect_draw_count);
radeon_add_to_buffer_list(
sctx, sctx->gfx_cs, params_buf,
} else {
int base_vertex;
- radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
- radeon_emit(cs, info->instance_count);
+ if (sctx->last_instance_count == SI_INSTANCE_COUNT_UNKNOWN ||
+ sctx->last_instance_count != instance_count) {
+ radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
+ radeon_emit(cs, instance_count);
+ sctx->last_instance_count = instance_count;
+ }
/* Base vertex and start instance. */
- base_vertex = index_size ? info->index_bias : info->start;
+ base_vertex = original_index_size ? info->index_bias : info->start;
if (sctx->num_vs_blit_sgprs) {
/* Re-emit draw constants after we leave u_blitter. */
}
if (index_size) {
+ if (dispatch_prim_discard_cs) {
+ index_va += info->start * original_index_size;
+ index_max_size = MIN2(index_max_size, info->count);
+
+ si_dispatch_prim_discard_cs_and_draw(sctx, info,
+ original_index_size,
+ base_vertex,
+ index_va, index_max_size);
+ return;
+ }
+
index_va += info->start * index_size;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
}
}
-static void si_emit_surface_sync(struct si_context *sctx,
- unsigned cp_coher_cntl)
+void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs,
+ unsigned cp_coher_cntl)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ bool compute_ib = !sctx->has_graphics ||
+ cs == sctx->prim_discard_compute_cs;
- if (sctx->chip_class >= GFX9) {
+ if (sctx->chip_class >= GFX9 || compute_ib) {
/* Flush caches and wait for the caches to assert idle. */
radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
radeon_emit(cs, 0); /* CP_COHER_BASE */
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
}
+
+ /* ACQUIRE_MEM has an implicit context roll if the current context
+ * is busy. */
+ if (!compute_ib)
+ sctx->context_roll = true;
+}
+
+void si_prim_discard_signal_next_compute_ib_start(struct si_context *sctx)
+{
+ if (!si_compute_prim_discard_enabled(sctx))
+ return;
+
+ if (!sctx->barrier_buf) {
+ u_suballocator_alloc(sctx->allocator_zeroed_memory, 4, 4,
+ &sctx->barrier_buf_offset,
+ (struct pipe_resource**)&sctx->barrier_buf);
+ }
+
+ /* Emit a placeholder to signal the next compute IB to start.
+ * See si_compute_prim_discard.c for explanation.
+ */
+ uint32_t signal = 1;
+ si_cp_write_data(sctx, sctx->barrier_buf, sctx->barrier_buf_offset,
+ 4, V_370_MEM, V_370_ME, &signal);
+
+ sctx->last_pkt3_write_data =
+ &sctx->gfx_cs->current.buf[sctx->gfx_cs->current.cdw - 5];
+
+ /* Only the last occurence of WRITE_DATA will be executed.
+ * The packet will be enabled in si_flush_gfx_cs.
+ */
+ *sctx->last_pkt3_write_data = PKT3(PKT3_NOP, 3, 0);
+}
+
+void gfx10_emit_cache_flush(struct si_context *ctx)
+{
+ struct radeon_cmdbuf *cs = ctx->gfx_cs;
+ uint32_t gcr_cntl = 0;
+ unsigned cb_db_event = 0;
+ unsigned flags = ctx->flags;
+
+ if (!ctx->has_graphics) {
+ /* Only process compute flags. */
+ flags &= SI_CONTEXT_INV_ICACHE |
+ SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2 |
+ SI_CONTEXT_INV_L2_METADATA |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ }
+
+ /* We don't need these. */
+ assert(!(flags & (SI_CONTEXT_VGT_FLUSH |
+ SI_CONTEXT_VGT_STREAMOUT_SYNC |
+ SI_CONTEXT_FLUSH_AND_INV_DB_META)));
+
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
+ ctx->num_cb_cache_flushes++;
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
+ ctx->num_db_cache_flushes++;
+
+ if (flags & SI_CONTEXT_INV_ICACHE)
+ gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
+ if (flags & SI_CONTEXT_INV_SCACHE) {
+ /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
+ * to FORWARD when both L1 and L2 are written out (WB or INV).
+ */
+ gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
+ }
+ if (flags & SI_CONTEXT_INV_VCACHE)
+ gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
+ if (flags & SI_CONTEXT_INV_L2) {
+ /* Writeback and invalidate everything in L2. */
+ gcr_cntl |= S_586_GL2_INV(1) | S_586_GLM_INV(1);
+ ctx->num_L2_invalidates++;
+ } else if (flags & SI_CONTEXT_WB_L2) {
+ /* Writeback but do not invalidate. */
+ gcr_cntl |= S_586_GL2_WB(1);
+ }
+ if (flags & SI_CONTEXT_INV_L2_METADATA)
+ gcr_cntl |= S_586_GLM_INV(1);
+
+ if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) |
+ EVENT_INDEX(0));
+ }
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
+ /* Flush HTILE. Will wait for idle later. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) |
+ EVENT_INDEX(0));
+ }
+
+ /* First flush CB/DB, then L1/L2. */
+ gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
+
+ if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
+ (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
+ cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+ } else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
+ } else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
+ cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
+ } else {
+ assert(0);
+ }
+ } else {
+ /* Wait for graphics shaders to go idle if requested. */
+ if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ /* Only count explicit shader flushes, not implicit ones. */
+ ctx->num_vs_flushes++;
+ ctx->num_ps_flushes++;
+ } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ ctx->num_vs_flushes++;
+ }
+ }
+
+ if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ ctx->num_cs_flushes++;
+ ctx->compute_is_busy = false;
+ }
+
+ if (cb_db_event) {
+ /* CB/DB flush and invalidate (or possibly just a wait for a
+ * meta flush) via RELEASE_MEM.
+ *
+ * Combine this with other cache flushes when possible; this
+ * requires affected shaders to be idle, so do it after the
+ * CS_PARTIAL_FLUSH before (VS/PS partial flushes are always
+ * implied).
+ */
+ uint64_t va;
+
+ /* Do the flush (enqueue the event and wait for it). */
+ va = ctx->wait_mem_scratch->gpu_address;
+ ctx->wait_mem_number++;
+
+ /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
+ unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
+ unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
+ unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
+ unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
+ assert(G_586_GL2_US(gcr_cntl) == 0);
+ assert(G_586_GL2_RANGE(gcr_cntl) == 0);
+ assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
+ unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
+ unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
+ unsigned gcr_seq = G_586_SEQ(gcr_cntl);
+
+ gcr_cntl &= C_586_GLM_WB &
+ C_586_GLM_INV &
+ C_586_GLV_INV &
+ C_586_GL1_INV &
+ C_586_GL2_INV &
+ C_586_GL2_WB; /* keep SEQ */
+
+ si_cp_release_mem(ctx, cs, cb_db_event,
+ S_490_GLM_WB(glm_wb) |
+ S_490_GLM_INV(glm_inv) |
+ S_490_GLV_INV(glv_inv) |
+ S_490_GL1_INV(gl1_inv) |
+ S_490_GL2_INV(gl2_inv) |
+ S_490_GL2_WB(gl2_wb) |
+ S_490_SEQ(gcr_seq),
+ EOP_DST_SEL_MEM,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
+ EOP_DATA_SEL_VALUE_32BIT,
+ ctx->wait_mem_scratch, va,
+ ctx->wait_mem_number, SI_NOT_QUERY);
+ si_cp_wait_mem(ctx, ctx->gfx_cs, va, ctx->wait_mem_number, 0xffffffff,
+ WAIT_REG_MEM_EQUAL);
+ }
+
+ /* Ignore fields that only modify the behavior of other fields. */
+ if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
+ /* Flush caches and wait for the caches to assert idle.
+ * The cache flush is executed in the ME, but the PFP waits
+ * for completion.
+ */
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
+ radeon_emit(cs, 0); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ radeon_emit(cs, gcr_cntl); /* GCR_CNTL */
+ } else if (cb_db_event ||
+ (flags & (SI_CONTEXT_VS_PARTIAL_FLUSH |
+ SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH))) {
+ /* We need to ensure that PFP waits as well. */
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+ }
+
+ if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
+ EVENT_INDEX(0));
+ } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
+ EVENT_INDEX(0));
+ }
+
+ ctx->flags = 0;
}
void si_emit_cache_flush(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint32_t flags = sctx->flags;
+
+ if (!sctx->has_graphics) {
+ /* Only process compute flags. */
+ flags &= SI_CONTEXT_INV_ICACHE |
+ SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2 |
+ SI_CONTEXT_INV_L2_METADATA |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ }
+
uint32_t cp_coher_cntl = 0;
- uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
- SI_CONTEXT_FLUSH_AND_INV_DB);
+ const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
+ SI_CONTEXT_FLUSH_AND_INV_DB);
+ const bool is_barrier = flush_cb_db ||
+ /* INV_ICACHE == beginning of gfx IB. Checking
+ * INV_ICACHE fixes corruption for DeusExMD with
+ * compute-based culling, but I don't know why.
+ */
+ flags & (SI_CONTEXT_INV_ICACHE |
+ SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_VS_PARTIAL_FLUSH) ||
+ (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
+ sctx->compute_is_busy);
+
+ assert(sctx->chip_class <= GFX9);
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
sctx->num_cb_cache_flushes++;
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
sctx->num_db_cache_flushes++;
- /* SI has a bug that it always flushes ICACHE and KCACHE if either
+ /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
* bit is set. An alternative way is to write SQC_CACHES, but that
* doesn't seem to work reliably. Since the bug doesn't affect
* correctness (it only does more work than necessary) and
if (flags & SI_CONTEXT_INV_ICACHE)
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
- if (flags & SI_CONTEXT_INV_SMEM_L1)
+ if (flags & SI_CONTEXT_INV_SCACHE)
cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
- if (sctx->chip_class <= VI) {
+ if (sctx->chip_class <= GFX8) {
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
S_0085F0_CB0_DEST_BASE_ENA(1) |
S_0085F0_CB7_DEST_BASE_ENA(1);
/* Necessary for DCC */
- if (sctx->chip_class == VI)
- si_gfx_write_event_eop(sctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
- 0, EOP_DATA_SEL_DISCARD, NULL,
- 0, 0, SI_NOT_QUERY);
+ if (sctx->chip_class == GFX8)
+ si_cp_release_mem(sctx, cs,
+ V_028A90_FLUSH_AND_INV_CB_DATA_TS,
+ 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
+ EOP_DATA_SEL_DISCARD, NULL,
+ 0, 0, SI_NOT_QUERY);
}
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
sctx->compute_is_busy) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
sctx->num_cs_flushes++;
sctx->compute_is_busy = false;
}
}
/* Ideally flush TC together with CB/DB. */
- if (flags & SI_CONTEXT_INV_GLOBAL_L2) {
+ if (flags & SI_CONTEXT_INV_L2) {
/* Writeback and invalidate everything in L2 & L1. */
tc_flags = EVENT_TC_ACTION_ENA |
EVENT_TC_WB_ACTION_ENA;
/* Clear the flags. */
- flags &= ~(SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_WRITEBACK_GLOBAL_L2 |
- SI_CONTEXT_INV_VMEM_L1);
+ flags &= ~(SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2 |
+ SI_CONTEXT_INV_VCACHE);
sctx->num_L2_invalidates++;
}
va = sctx->wait_mem_scratch->gpu_address;
sctx->wait_mem_number++;
- si_gfx_write_event_eop(sctx, cb_db_event, tc_flags,
- EOP_DATA_SEL_VALUE_32BIT,
- sctx->wait_mem_scratch, va,
- sctx->wait_mem_number, SI_NOT_QUERY);
- si_gfx_wait_fence(sctx, va, sctx->wait_mem_number, 0xffffffff);
+ si_cp_release_mem(sctx, cs, cb_db_event, tc_flags,
+ EOP_DST_SEL_MEM,
+ EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
+ EOP_DATA_SEL_VALUE_32BIT,
+ sctx->wait_mem_scratch, va,
+ sctx->wait_mem_number, SI_NOT_QUERY);
+ si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff,
+ WAIT_REG_MEM_EQUAL);
}
/* Make sure ME is idle (it executes most packets) before continuing.
* This prevents read-after-write hazards between PFP and ME.
*/
- if (cp_coher_cntl ||
- (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
- SI_CONTEXT_INV_VMEM_L1 |
- SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
+ if (sctx->has_graphics &&
+ (cp_coher_cntl ||
+ (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
+ SI_CONTEXT_INV_VCACHE |
+ SI_CONTEXT_INV_L2 |
+ SI_CONTEXT_WB_L2)))) {
radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
radeon_emit(cs, 0);
}
- /* SI-CI-VI only:
+ /* GFX6-GFX8 only:
* When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
* waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
*
* cp_coher_cntl should contain all necessary flags except TC flags
* at this point.
*
- * SI-CIK don't support L2 write-back.
+ * GFX6-GFX7 don't support L2 write-back.
*/
- if (flags & SI_CONTEXT_INV_GLOBAL_L2 ||
- (sctx->chip_class <= CIK &&
- (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
- /* Invalidate L1 & L2. (L1 is always invalidated on SI)
- * WB must be set on VI+ when TC_ACTION is set.
+ if (flags & SI_CONTEXT_INV_L2 ||
+ (sctx->chip_class <= GFX7 &&
+ (flags & SI_CONTEXT_WB_L2))) {
+ /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
+ * WB must be set on GFX8+ when TC_ACTION is set.
*/
- si_emit_surface_sync(sctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) |
- S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= VI));
+ S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
cp_coher_cntl = 0;
sctx->num_L2_invalidates++;
} else {
/* L1 invalidation and L2 writeback must be done separately,
* because both operations can't be done together.
*/
- if (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2) {
+ if (flags & SI_CONTEXT_WB_L2) {
/* WB = write-back
* NC = apply to non-coherent MTYPEs
* (i.e. MTYPE <= 1, which is what we use everywhere)
*
* WB doesn't work without NC.
*/
- si_emit_surface_sync(sctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) |
S_0301F0_TC_NC_ACTION_ENA(1));
cp_coher_cntl = 0;
sctx->num_L2_writebacks++;
}
- if (flags & SI_CONTEXT_INV_VMEM_L1) {
+ if (flags & SI_CONTEXT_INV_VCACHE) {
/* Invalidate per-CU VMEM L1. */
- si_emit_surface_sync(sctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0;
}
/* If TC flushes haven't cleared this... */
if (cp_coher_cntl)
- si_emit_surface_sync(sctx, cp_coher_cntl);
+ si_emit_surface_sync(sctx, sctx->gfx_cs, cp_coher_cntl);
+
+ if (is_barrier)
+ si_prim_discard_signal_next_compute_ib_start(sctx);
if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
}
static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
- unsigned skip_atom_mask)
+ enum pipe_prim_type prim, unsigned instance_count,
+ bool primitive_restart, unsigned skip_atom_mask)
{
+ unsigned num_patches = 0;
+
+ si_emit_rasterizer_prim_state(sctx);
+ if (sctx->tes_shader.cso)
+ si_emit_derived_tess_state(sctx, info, &num_patches);
+
/* Emit state atoms. */
unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
- while (mask) {
- struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
+ while (mask)
+ sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
- atom->emit(sctx, atom);
- }
sctx->dirty_atoms &= skip_atom_mask;
/* Emit states. */
sctx->dirty_states = 0;
/* Emit draw states. */
- unsigned num_patches = 0;
-
- si_emit_rasterizer_prim_state(sctx);
- if (sctx->tes_shader.cso)
- si_emit_derived_tess_state(sctx, info, &num_patches);
si_emit_vs_state(sctx, info);
- si_emit_draw_registers(sctx, info, num_patches);
+ si_emit_draw_registers(sctx, info, prim, num_patches, instance_count,
+ primitive_restart);
+}
+
+static bool
+si_all_vs_resources_read_only(struct si_context *sctx,
+ struct pipe_resource *indexbuf)
+{
+ struct radeon_winsys *ws = sctx->ws;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+
+ /* Index buffer. */
+ if (indexbuf &&
+ ws->cs_is_buffer_referenced(cs, si_resource(indexbuf)->buf,
+ RADEON_USAGE_WRITE))
+ goto has_write_reference;
+
+ /* Vertex buffers. */
+ struct si_vertex_elements *velems = sctx->vertex_elements;
+ unsigned num_velems = velems->count;
+
+ for (unsigned i = 0; i < num_velems; i++) {
+ if (!((1 << i) & velems->first_vb_use_mask))
+ continue;
+
+ unsigned vb_index = velems->vertex_buffer_index[i];
+ struct pipe_resource *res = sctx->vertex_buffer[vb_index].buffer.resource;
+ if (!res)
+ continue;
+
+ if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
+ RADEON_USAGE_WRITE))
+ goto has_write_reference;
+ }
+
+ /* Constant and shader buffers. */
+ struct si_descriptors *buffers =
+ &sctx->descriptors[si_const_and_shader_buffer_descriptors_idx(PIPE_SHADER_VERTEX)];
+ for (unsigned i = 0; i < buffers->num_active_slots; i++) {
+ unsigned index = buffers->first_active_slot + i;
+ struct pipe_resource *res =
+ sctx->const_and_shader_buffers[PIPE_SHADER_VERTEX].buffers[index];
+ if (!res)
+ continue;
+
+ if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
+ RADEON_USAGE_WRITE))
+ goto has_write_reference;
+ }
+
+ /* Samplers. */
+ struct si_shader_selector *vs = sctx->vs_shader.cso;
+ if (vs->info.samplers_declared) {
+ unsigned num_samplers = util_last_bit(vs->info.samplers_declared);
+
+ for (unsigned i = 0; i < num_samplers; i++) {
+ struct pipe_sampler_view *view = sctx->samplers[PIPE_SHADER_VERTEX].views[i];
+ if (!view)
+ continue;
+
+ if (ws->cs_is_buffer_referenced(cs,
+ si_resource(view->texture)->buf,
+ RADEON_USAGE_WRITE))
+ goto has_write_reference;
+ }
+ }
+
+ /* Images. */
+ if (vs->info.images_declared) {
+ unsigned num_images = util_last_bit(vs->info.images_declared);
+
+ for (unsigned i = 0; i < num_images; i++) {
+ struct pipe_resource *res = sctx->images[PIPE_SHADER_VERTEX].views[i].resource;
+ if (!res)
+ continue;
+
+ if (ws->cs_is_buffer_referenced(cs, si_resource(res)->buf,
+ RADEON_USAGE_WRITE))
+ goto has_write_reference;
+ }
+ }
+
+ return true;
+
+has_write_reference:
+ /* If the current gfx IB has enough packets, flush it to remove write
+ * references to buffers.
+ */
+ if (cs->prev_dw + cs->current.cdw > 2048) {
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
+ assert(si_all_vs_resources_read_only(sctx, indexbuf));
+ return true;
+ }
+ return false;
}
-void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
+static ALWAYS_INLINE bool pd_msg(const char *s)
+{
+ if (SI_PRIM_DISCARD_DEBUG)
+ printf("PD failed: %s\n", s);
+ return false;
+}
+
+static void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct pipe_resource *indexbuf = info->index.resource;
- unsigned dirty_tex_counter;
- enum pipe_prim_type rast_prim;
+ unsigned dirty_tex_counter, dirty_buf_counter;
+ enum pipe_prim_type rast_prim, prim = info->mode;
unsigned index_size = info->index_size;
unsigned index_offset = info->indirect ? info->start * index_size : 0;
+ unsigned instance_count = info->instance_count;
+ bool primitive_restart = info->primitive_restart &&
+ (!sctx->screen->options.prim_restart_tri_strips_only ||
+ (prim != PIPE_PRIM_TRIANGLE_STRIP &&
+ prim != PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY));
if (likely(!info->indirect)) {
- /* SI-CI treat instance_count==0 as instance_count==1. There is
+ /* GFX6-GFX7 treat instance_count==0 as instance_count==1. There is
* no workaround for indirect draws, but we can at least skip
* direct draws.
*/
- if (unlikely(!info->instance_count))
+ if (unlikely(!instance_count))
return;
/* Handle count == 0. */
return;
}
- if (unlikely(!sctx->vs_shader.cso)) {
- assert(0);
- return;
- }
- if (unlikely(!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard))) {
- assert(0);
- return;
- }
- if (unlikely(!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES))) {
+ if (unlikely(!sctx->vs_shader.cso ||
+ !rs ||
+ (!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
+ (!!sctx->tes_shader.cso != (prim == PIPE_PRIM_PATCHES)))) {
assert(0);
return;
}
sctx->framebuffer.dirty_cbufs |=
((1 << sctx->framebuffer.state.nr_cbufs) - 1);
sctx->framebuffer.dirty_zsbuf = true;
- si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
si_update_all_texture_descriptors(sctx);
}
+ dirty_buf_counter = p_atomic_read(&sctx->screen->dirty_buf_counter);
+ if (unlikely(dirty_buf_counter != sctx->last_dirty_buf_counter)) {
+ sctx->last_dirty_buf_counter = dirty_buf_counter;
+ /* Rebind all buffers unconditionally. */
+ si_rebind_buffer(sctx, NULL);
+ }
+
si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
/* Set the rasterization primitive type.
else
rast_prim = sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
} else
- rast_prim = info->mode;
+ rast_prim = prim;
if (rast_prim != sctx->current_rast_prim) {
- bool old_is_poly = sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES;
- bool new_is_poly = rast_prim >= PIPE_PRIM_TRIANGLES;
- if (old_is_poly != new_is_poly) {
- sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(sctx, &sctx->scissors.atom);
- }
+ if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
+ util_prim_is_points_or_lines(rast_prim))
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
sctx->current_rast_prim = rast_prim;
sctx->do_update_shaders = true;
*/
bool gs_tri_strip_adj_fix =
!sctx->tes_shader.cso &&
- info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
- !info->primitive_restart;
+ prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
+ !primitive_restart;
if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) {
sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
}
}
- if (sctx->do_update_shaders && !si_update_shaders(sctx))
- return;
-
if (index_size) {
/* Translate or upload, if needed. */
- /* 8-bit indices are supported on VI. */
- if (sctx->chip_class <= CIK && index_size == 1) {
+ /* 8-bit indices are supported on GFX8. */
+ if (sctx->chip_class <= GFX7 && index_size == 1) {
unsigned start, count, start_offset, size, offset;
void *ptr;
/* info->start will be added by the drawing code */
index_offset -= start_offset;
- } else if (sctx->chip_class <= CIK &&
- r600_resource(indexbuf)->TC_L2_dirty) {
- /* VI reads index buffers through TC L2, so it doesn't
+ } else if (sctx->chip_class <= GFX7 &&
+ si_resource(indexbuf)->TC_L2_dirty) {
+ /* GFX8 reads index buffers through TC L2, so it doesn't
* need this. */
- sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(indexbuf)->TC_L2_dirty = false;
+ sctx->flags |= SI_CONTEXT_WB_L2;
+ si_resource(indexbuf)->TC_L2_dirty = false;
}
}
+ bool dispatch_prim_discard_cs = false;
+ bool prim_discard_cs_instancing = false;
+ unsigned original_index_size = index_size;
+ unsigned direct_count = 0;
+
if (info->indirect) {
struct pipe_draw_indirect_info *indirect = info->indirect;
si_context_add_resource_size(sctx, indirect->buffer);
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
- if (sctx->chip_class <= VI) {
- if (r600_resource(indirect->buffer)->TC_L2_dirty) {
- sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(indirect->buffer)->TC_L2_dirty = false;
+ if (sctx->chip_class <= GFX8) {
+ if (si_resource(indirect->buffer)->TC_L2_dirty) {
+ sctx->flags |= SI_CONTEXT_WB_L2;
+ si_resource(indirect->buffer)->TC_L2_dirty = false;
}
if (indirect->indirect_draw_count &&
- r600_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
- sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
+ si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
+ sctx->flags |= SI_CONTEXT_WB_L2;
+ si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
}
}
+ } else {
+ /* Multiply by 3 for strips and fans to get an approximate vertex
+ * count as triangles. */
+ direct_count = info->count * instance_count *
+ (prim == PIPE_PRIM_TRIANGLES ? 1 : 3);
+ }
+
+ /* Determine if we can use the primitive discard compute shader. */
+ if (si_compute_prim_discard_enabled(sctx) &&
+ (direct_count > sctx->prim_discard_vertex_count_threshold ?
+ (sctx->compute_num_verts_rejected += direct_count, true) : /* Add, then return true. */
+ (sctx->compute_num_verts_ineligible += direct_count, false)) && /* Add, then return false. */
+ (!info->count_from_stream_output || pd_msg("draw_opaque")) &&
+ (primitive_restart ?
+ /* Supported prim types with primitive restart: */
+ (prim == PIPE_PRIM_TRIANGLE_STRIP || pd_msg("bad prim type with primitive restart")) &&
+ /* Disallow instancing with primitive restart: */
+ (instance_count == 1 || pd_msg("instance_count > 1 with primitive restart")) :
+ /* Supported prim types without primitive restart + allow instancing: */
+ (1 << prim) & ((1 << PIPE_PRIM_TRIANGLES) |
+ (1 << PIPE_PRIM_TRIANGLE_STRIP) |
+ (1 << PIPE_PRIM_TRIANGLE_FAN)) &&
+ /* Instancing is limited to 16-bit indices, because InstanceID is packed into VertexID. */
+ /* TODO: DrawArraysInstanced doesn't sometimes work, so it's disabled. */
+ (instance_count == 1 ||
+ (instance_count <= USHRT_MAX && index_size && index_size <= 2) ||
+ pd_msg("instance_count too large or index_size == 4 or DrawArraysInstanced"))) &&
+ (info->drawid == 0 || !sctx->vs_shader.cso->info.uses_drawid || pd_msg("draw_id > 0")) &&
+ (!sctx->render_cond || pd_msg("render condition")) &&
+ /* Forced enablement ignores pipeline statistics queries. */
+ (sctx->screen->debug_flags & (DBG(PD) | DBG(ALWAYS_PD)) ||
+ (!sctx->num_pipeline_stat_queries && !sctx->streamout.prims_gen_query_enabled) ||
+ pd_msg("pipestat or primgen query")) &&
+ (!sctx->vertex_elements->instance_divisor_is_fetched || pd_msg("loads instance divisors")) &&
+ (!sctx->tes_shader.cso || pd_msg("uses tess")) &&
+ (!sctx->gs_shader.cso || pd_msg("uses GS")) &&
+ (!sctx->ps_shader.cso->info.uses_primid || pd_msg("PS uses PrimID")) &&
+#if SI_PRIM_DISCARD_DEBUG /* same as cso->prim_discard_cs_allowed */
+ (!sctx->vs_shader.cso->info.uses_bindless_images || pd_msg("uses bindless images")) &&
+ (!sctx->vs_shader.cso->info.uses_bindless_samplers || pd_msg("uses bindless samplers")) &&
+ (!sctx->vs_shader.cso->info.writes_memory || pd_msg("writes memory")) &&
+ (!sctx->vs_shader.cso->info.writes_viewport_index || pd_msg("writes viewport index")) &&
+ !sctx->vs_shader.cso->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION] &&
+ !sctx->vs_shader.cso->so.num_outputs &&
+#else
+ (sctx->vs_shader.cso->prim_discard_cs_allowed || pd_msg("VS shader uses unsupported features")) &&
+#endif
+ /* Check that all buffers are used for read only, because compute
+ * dispatches can run ahead. */
+ (si_all_vs_resources_read_only(sctx, index_size ? indexbuf : NULL) || pd_msg("write reference"))) {
+ switch (si_prepare_prim_discard_or_split_draw(sctx, info, primitive_restart)) {
+ case SI_PRIM_DISCARD_ENABLED:
+ original_index_size = index_size;
+ prim_discard_cs_instancing = instance_count > 1;
+ dispatch_prim_discard_cs = true;
+
+ /* The compute shader changes/lowers the following: */
+ prim = PIPE_PRIM_TRIANGLES;
+ index_size = 4;
+ instance_count = 1;
+ primitive_restart = false;
+ sctx->compute_num_verts_rejected -= direct_count;
+ sctx->compute_num_verts_accepted += direct_count;
+ break;
+ case SI_PRIM_DISCARD_DISABLED:
+ break;
+ case SI_PRIM_DISCARD_DRAW_SPLIT:
+ sctx->compute_num_verts_rejected -= direct_count;
+ goto return_cleanup;
+ }
+ }
+
+ if (prim_discard_cs_instancing != sctx->prim_discard_cs_instancing) {
+ sctx->prim_discard_cs_instancing = prim_discard_cs_instancing;
+ sctx->do_update_shaders = true;
}
+ if (sctx->do_update_shaders && !si_update_shaders(sctx))
+ goto return_cleanup;
+
si_need_gfx_cs_space(sctx);
+ if (sctx->bo_list_add_all_gfx_resources)
+ si_gfx_resources_add_all_to_bo_list(sctx);
+
/* Since we've called si_context_add_resource_size for vertex buffers,
* this must be called after si_need_cs_space, because we must let
* need_cs_space flush before we add buffers to the buffer list.
*/
if (!si_upload_vertex_buffer_descriptors(sctx))
- return;
+ goto return_cleanup;
- /* Vega10/Raven scissor bug workaround. This must be done before VPORT
- * scissor registers are changed. There is also a more efficient but
- * more involved alternative workaround.
+ /* Vega10/Raven scissor bug workaround. When any context register is
+ * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
+ * registers must be written too.
*/
- if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
- si_is_atom_dirty(sctx, &sctx->scissors.atom)) {
- sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
- si_emit_cache_flush(sctx);
+ bool has_gfx9_scissor_bug = sctx->screen->has_gfx9_scissor_bug;
+ unsigned masked_atoms = 0;
+
+ if (has_gfx9_scissor_bug) {
+ masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.scissors);
+
+ if (info->count_from_stream_output ||
+ sctx->dirty_atoms & si_atoms_that_always_roll_context() ||
+ sctx->dirty_states & si_states_that_always_roll_context())
+ sctx->context_roll = true;
}
/* Use optimal packet order based on whether we need to sync the pipeline. */
SI_CONTEXT_CS_PARTIAL_FLUSH))) {
/* If we have to wait for idle, set all states first, so that all
* SET packets are processed in parallel with previous draw calls.
- * Then upload descriptors, set shader pointers, and draw, and
- * prefetch at the end. This ensures that the time the CUs
- * are idle is very short. (there are only SET_SH packets between
- * the wait and the draw)
+ * Then draw and prefetch at the end. This ensures that the time
+ * the CUs are idle is very short.
*/
- struct r600_atom *shader_pointers = &sctx->shader_pointers.atom;
- unsigned masked_atoms = 1u << shader_pointers->id;
-
if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
- masked_atoms |= 1u << sctx->render_cond_atom.id;
+ masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
- /* Emit all states except shader pointers and render condition. */
- si_emit_all_states(sctx, info, masked_atoms);
- si_emit_cache_flush(sctx);
+ if (!si_upload_graphics_shader_descriptors(sctx))
+ goto return_cleanup;
+ /* Emit all states except possibly render condition. */
+ si_emit_all_states(sctx, info, prim, instance_count,
+ primitive_restart, masked_atoms);
+ sctx->emit_cache_flush(sctx);
/* <-- CUs are idle here. */
- if (!si_upload_graphics_shader_descriptors(sctx))
- return;
- /* Set shader pointers after descriptors are uploaded. */
- if (si_is_atom_dirty(sctx, shader_pointers))
- shader_pointers->emit(sctx, NULL);
- if (si_is_atom_dirty(sctx, &sctx->render_cond_atom))
- sctx->render_cond_atom.emit(sctx, NULL);
+ if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
+ sctx->atoms.s.render_cond.emit(sctx);
+
+ if (has_gfx9_scissor_bug &&
+ (sctx->context_roll ||
+ si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
+ sctx->atoms.s.scissors.emit(sctx);
+
sctx->dirty_atoms = 0;
- si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
+ si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset,
+ instance_count, dispatch_prim_discard_cs,
+ original_index_size);
/* <-- CUs are busy here. */
/* Start prefetches after the draw has been started. Both will run
* in parallel, but starting the draw first is more important.
*/
- if (sctx->chip_class >= CIK && sctx->prefetch_L2_mask)
- cik_emit_prefetch_L2(sctx);
+ if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
+ cik_emit_prefetch_L2(sctx, false);
} else {
/* If we don't wait for idle, start prefetches first, then set
* states, and draw at the end.
*/
if (sctx->flags)
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
- if (sctx->chip_class >= CIK && sctx->prefetch_L2_mask)
- cik_emit_prefetch_L2(sctx);
+ /* Only prefetch the API VS and VBO descriptors. */
+ if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
+ cik_emit_prefetch_L2(sctx, true);
if (!si_upload_graphics_shader_descriptors(sctx))
- return;
+ goto return_cleanup;
- si_emit_all_states(sctx, info, 0);
- si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
+ si_emit_all_states(sctx, info, prim, instance_count,
+ primitive_restart, masked_atoms);
+
+ if (has_gfx9_scissor_bug &&
+ (sctx->context_roll ||
+ si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)))
+ sctx->atoms.s.scissors.emit(sctx);
+
+ sctx->dirty_atoms = 0;
+
+ si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset,
+ instance_count, dispatch_prim_discard_cs,
+ original_index_size);
+
+ /* Prefetch the remaining shaders after the draw has been
+ * started. */
+ if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
+ cik_emit_prefetch_L2(sctx, false);
}
+ /* Clear the context roll flag after the draw call. */
+ sctx->context_roll = false;
+
if (unlikely(sctx->current_saved_cs)) {
si_trace_emit(sctx);
si_log_draw_state(sctx, sctx->log);
sctx->num_draw_calls++;
if (sctx->framebuffer.state.nr_cbufs > 1)
sctx->num_mrt_draw_calls++;
- if (info->primitive_restart)
+ if (primitive_restart)
sctx->num_prim_restart_calls++;
if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
sctx->num_spill_draw_calls++;
}
+
+return_cleanup:
if (index_size && indexbuf != info->index.resource)
pipe_resource_reference(&indexbuf, NULL);
}
-void si_draw_rectangle(struct blitter_context *blitter,
- void *vertex_elements_cso,
- blitter_get_vs_func get_vs,
- int x1, int y1, int x2, int y2,
- float depth, unsigned num_instances,
- enum blitter_attrib_type type,
- const union blitter_attrib *attrib)
+static void
+si_draw_rectangle(struct blitter_context *blitter,
+ void *vertex_elements_cso,
+ blitter_get_vs_func get_vs,
+ int x1, int y1, int x2, int y2,
+ float depth, unsigned num_instances,
+ enum blitter_attrib_type type,
+ const union blitter_attrib *attrib)
{
struct pipe_context *pipe = util_blitter_get_pipe(blitter);
struct si_context *sctx = (struct si_context*)pipe;
case UTIL_BLITTER_ATTRIB_NONE:;
}
- pipe->bind_vs_state(pipe, si_get_blit_vs(sctx, type, num_instances));
+ pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
struct pipe_draw_info info = {};
info.mode = SI_PRIM_RECTANGLE_LIST;
void si_trace_emit(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->gfx_cs;
- uint64_t va = sctx->current_saved_cs->trace_buf->gpu_address;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
- radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
- S_370_WR_CONFIRM(1) |
- S_370_ENGINE_SEL(V_370_ME));
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
- radeon_emit(cs, trace_id);
+ si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf,
+ 0, 4, V_370_MEM, V_370_ME, &trace_id);
+
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, AC_ENCODE_TRACE_POINT(trace_id));
if (sctx->log)
u_log_flush(sctx->log);
}
+
+void si_init_draw_functions(struct si_context *sctx)
+{
+ sctx->b.draw_vbo = si_draw_vbo;
+
+ sctx->blitter->draw_rectangle = si_draw_rectangle;
+
+ si_init_ia_multi_vgt_param_table(sctx);
+}