#include "util/u_index_modify.h"
#include "util/u_upload_mgr.h"
+#include "util/u_prim.h"
static void si_decompress_textures(struct si_context *sctx)
{
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ,
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ,
[PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ,
+ [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH,
[R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST
};
assert(mode < Elements(prim_conv));
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
[PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
[R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
};
assert(mode < Elements(prim_conv));
return prim_conv[mode];
}
+/**
+ * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
+ * LS.LDS_SIZE is shared by all 3 shader stages.
+ *
+ * The information about LDS and other non-compile-time parameters is then
+ * written to userdata SGPRs.
+ */
+static void si_emit_derived_tess_state(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ unsigned *num_patches)
+{
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct si_shader_selector *ls = sctx->vs_shader;
+ /* The TES pointer will only be used for sctx->last_tcs.
+ * It would be wrong to think that TCS = TES. */
+ struct si_shader_selector *tcs =
+ sctx->tcs_shader ? sctx->tcs_shader : sctx->tes_shader;
+ unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL];
+ unsigned num_tcs_input_cp = info->vertices_per_patch;
+ unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
+ unsigned num_tcs_patch_outputs;
+ unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
+ unsigned input_patch_size, output_patch_size, output_patch0_offset;
+ unsigned perpatch_output_offset, lds_size, ls_rsrc2;
+ unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
+
+ *num_patches = 1; /* TODO: calculate this */
+
+ if (sctx->last_ls == ls->current &&
+ sctx->last_tcs == tcs &&
+ sctx->last_tes_sh_base == tes_sh_base &&
+ sctx->last_num_tcs_input_cp == num_tcs_input_cp)
+ return;
+
+ sctx->last_ls = ls->current;
+ sctx->last_tcs = tcs;
+ sctx->last_tes_sh_base = tes_sh_base;
+ sctx->last_num_tcs_input_cp = num_tcs_input_cp;
+
+ /* This calculates how shader inputs and outputs among VS, TCS, and TES
+ * are laid out in LDS. */
+ num_tcs_inputs = util_last_bit64(ls->outputs_written);
+
+ if (sctx->tcs_shader) {
+ num_tcs_outputs = util_last_bit64(tcs->outputs_written);
+ num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
+ num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
+ } else {
+ /* No TCS. Route varyings from LS to TES. */
+ num_tcs_outputs = num_tcs_inputs;
+ num_tcs_output_cp = num_tcs_input_cp;
+ num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
+ }
+
+ input_vertex_size = num_tcs_inputs * 16;
+ output_vertex_size = num_tcs_outputs * 16;
+
+ input_patch_size = num_tcs_input_cp * input_vertex_size;
+
+ pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
+ output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
+
+ output_patch0_offset = sctx->tcs_shader ? input_patch_size * *num_patches : 0;
+ perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
+
+ lds_size = output_patch0_offset + output_patch_size * *num_patches;
+ ls_rsrc2 = ls->current->ls_rsrc2;
+
+ if (sctx->b.chip_class >= CIK) {
+ assert(lds_size <= 65536);
+ ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 512) / 512);
+ } else {
+ assert(lds_size <= 32768);
+ ls_rsrc2 |= S_00B52C_LDS_SIZE(align(lds_size, 256) / 256);
+ }
+
+ /* Due to a hw bug, RSRC2_LS must be written twice with another
+ * LS register written in between. */
+ if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
+ si_write_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
+ si_write_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
+ radeon_emit(cs, ls->current->ls_rsrc1);
+ radeon_emit(cs, ls_rsrc2);
+
+ /* Compute userdata SGPRs. */
+ assert(((input_vertex_size / 4) & ~0xff) == 0);
+ assert(((output_vertex_size / 4) & ~0xff) == 0);
+ assert(((input_patch_size / 4) & ~0x1fff) == 0);
+ assert(((output_patch_size / 4) & ~0x1fff) == 0);
+ assert(((output_patch0_offset / 16) & ~0xffff) == 0);
+ assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
+ assert(num_tcs_input_cp <= 32);
+ assert(num_tcs_output_cp <= 32);
+
+ tcs_in_layout = (input_patch_size / 4) |
+ ((input_vertex_size / 4) << 13);
+ tcs_out_layout = (output_patch_size / 4) |
+ ((output_vertex_size / 4) << 13);
+ tcs_out_offsets = (output_patch0_offset / 16) |
+ ((perpatch_output_offset / 16) << 16);
+
+ /* Set them for LS. */
+ si_write_sh_reg(cs,
+ R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4,
+ tcs_in_layout);
+
+ /* Set them for TCS. */
+ si_write_sh_reg_seq(cs,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OUT_OFFSETS * 4, 3);
+ radeon_emit(cs, tcs_out_offsets);
+ radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
+ radeon_emit(cs, tcs_in_layout);
+
+ /* Set them for TES. */
+ si_write_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
+ radeon_emit(cs, tcs_out_offsets);
+ radeon_emit(cs, tcs_out_layout | (num_tcs_output_cp << 26));
+}
+
static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ unsigned num_patches)
{
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned prim = info->mode;
/* SWITCH_ON_EOP(0) is always preferable. */
bool wd_switch_on_eop = false;
bool ia_switch_on_eop = false;
+ bool ia_switch_on_eoi = false;
bool partial_vs_wave = false;
+ bool partial_es_wave = false;
if (sctx->gs_shader)
primgroup_size = 64; /* recommended with a GS */
+ if (sctx->tes_shader) {
+ unsigned num_cp_out =
+ sctx->tcs_shader ?
+ sctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
+ info->vertices_per_patch;
+ unsigned max_size = 256 / MAX2(info->vertices_per_patch, num_cp_out);
+
+ primgroup_size = MIN2(primgroup_size, max_size);
+
+ /* primgroup_size must be set to a multiple of NUM_PATCHES */
+ primgroup_size = (primgroup_size / num_patches) * num_patches;
+
+ /* SWITCH_ON_EOI must be set if PrimID is used.
+ * If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
+ if ((sctx->tcs_shader && sctx->tcs_shader->info.uses_primid) ||
+ sctx->tes_shader->info.uses_primid) {
+ ia_switch_on_eoi = true;
+ partial_es_wave = true;
+ }
+
+ /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
+ if ((sctx->b.family == CHIP_TAHITI ||
+ sctx->b.family == CHIP_PITCAIRN ||
+ sctx->b.family == CHIP_BONAIRE) &&
+ sctx->gs_shader)
+ partial_vs_wave = true;
+ }
+
/* This is a hardware requirement. */
if ((rs && rs->line_stipple_enable) ||
(sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
(info->indirect || info->instance_count > 1))
wd_switch_on_eop = true;
+ /* USE_OPAQUE doesn't work when WD_SWITCH_ON_EOP is 0. */
+ if (info->count_from_stream_output)
+ wd_switch_on_eop = true;
+
/* If the WD switch is false, the IA switch must be false too. */
assert(wd_switch_on_eop || !ia_switch_on_eop);
}
+ /* Hw bug with single-primitive instances and SWITCH_ON_EOI
+ * on multi-SE chips. */
+ if (sctx->b.screen->info.max_se >= 2 && ia_switch_on_eoi &&
+ (info->indirect ||
+ (info->instance_count > 1 &&
+ u_prims_for_vertices(info->mode, info->count) <= 1)))
+ sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
+
+ /* Instancing bug on 2 SE chips. */
+ if (sctx->b.screen->info.max_se == 2 && ia_switch_on_eoi &&
+ (info->indirect || info->instance_count > 1))
+ partial_vs_wave = true;
+
return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
+ S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
+ S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
- S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0);
+ S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
+ S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ? 2 : 0);
+}
+
+static unsigned si_get_ls_hs_config(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ unsigned num_patches)
+{
+ unsigned num_output_cp;
+
+ if (!sctx->tes_shader)
+ return 0;
+
+ num_output_cp = sctx->tcs_shader ?
+ sctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
+ info->vertices_per_patch;
+
+ return S_028B58_NUM_PATCHES(num_patches) |
+ S_028B58_HS_NUM_INPUT_CP(info->vertices_per_patch) |
+ S_028B58_HS_NUM_OUTPUT_CP(num_output_cp);
}
-static void si_emit_rasterizer_prim_state(struct si_context *sctx, unsigned mode)
+static void si_emit_scratch_reloc(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- if (sctx->gs_shader)
- mode = sctx->gs_shader->gs_output_prim;
+ if (!sctx->emit_scratch_reloc)
+ return;
- if (mode == sctx->last_rast_prim)
+ r600_write_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+ sctx->spi_tmpring_size);
+
+ if (sctx->scratch_buffer) {
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ sctx->scratch_buffer, RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SHADER_RESOURCE_RW);
+
+ }
+ sctx->emit_scratch_reloc = false;
+}
+
+/* rast_prim is the primitive type after GS. */
+static void si_emit_rasterizer_prim_state(struct si_context *sctx)
+{
+ struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ unsigned rast_prim = sctx->current_rast_prim;
+ struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
+
+ /* Skip this if not rendering lines. */
+ if (rast_prim != PIPE_PRIM_LINES &&
+ rast_prim != PIPE_PRIM_LINE_LOOP &&
+ rast_prim != PIPE_PRIM_LINE_STRIP &&
+ rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
+ rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
return;
- r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
- sctx->pa_sc_line_stipple |
- S_028A0C_AUTO_RESET_CNTL(mode == PIPE_PRIM_LINES ? 1 :
- mode == PIPE_PRIM_LINE_STRIP ? 2 : 0));
+ if (rast_prim == sctx->last_rast_prim &&
+ rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
+ return;
- r600_write_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL,
- sctx->pa_su_sc_mode_cntl |
- S_028814_PROVOKING_VTX_LAST(mode == PIPE_PRIM_QUADS ||
- mode == PIPE_PRIM_QUAD_STRIP ||
- mode == PIPE_PRIM_POLYGON));
+ r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ rs->pa_sc_line_stipple |
+ S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 :
+ rast_prim == PIPE_PRIM_LINE_STRIP ? 2 : 0));
- sctx->last_rast_prim = mode;
+ sctx->last_rast_prim = rast_prim;
+ sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
}
static void si_emit_draw_registers(struct si_context *sctx,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib)
+ const struct pipe_draw_info *info)
{
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
unsigned prim = si_conv_pipe_prim(info->mode);
- unsigned gs_out_prim =
- si_conv_prim_to_gs_out(sctx->gs_shader ?
- sctx->gs_shader->gs_output_prim :
- info->mode);
- unsigned ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info);
+ unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
+ unsigned ia_multi_vgt_param, ls_hs_config, num_patches = 0;
+
+ if (sctx->tes_shader)
+ si_emit_derived_tess_state(sctx, info, &num_patches);
+
+ ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
+ ls_hs_config = si_get_ls_hs_config(sctx, info, num_patches);
/* Draw state. */
if (prim != sctx->last_prim ||
- ia_multi_vgt_param != sctx->last_multi_vgt_param) {
+ ia_multi_vgt_param != sctx->last_multi_vgt_param ||
+ ls_hs_config != sctx->last_ls_hs_config) {
if (sctx->b.chip_class >= CIK) {
radeon_emit(cs, PKT3(PKT3_DRAW_PREAMBLE, 2, 0));
radeon_emit(cs, prim); /* VGT_PRIMITIVE_TYPE */
radeon_emit(cs, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
- radeon_emit(cs, 0); /* VGT_LS_HS_CONFIG */
+ radeon_emit(cs, ls_hs_config); /* VGT_LS_HS_CONFIG */
} else {
r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
r600_write_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
+ r600_write_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
}
sctx->last_prim = prim;
sctx->last_multi_vgt_param = ia_multi_vgt_param;
+ sctx->last_ls_hs_config = ls_hs_config;
}
if (gs_out_prim != sctx->last_gs_out_prim) {
const struct pipe_index_buffer *ib)
{
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- unsigned sh_base_reg = (sctx->gs_shader ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
- R_00B130_SPI_SHADER_USER_DATA_VS_0);
+ unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
if (info->count_from_stream_output) {
struct r600_so_target *t =
if (info->indexed) {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- if (ib->index_size == 4) {
- radeon_emit(cs, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
- } else {
- radeon_emit(cs, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
+ /* index type */
+ switch (ib->index_size) {
+ case 1:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_8);
+ break;
+ case 2:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_16 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
+ break;
+ case 4:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_32 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
+ break;
+ default:
+ assert(!"unreachable");
+ return;
}
}
}
}
+#define BOTH_ICACHE_KCACHE (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_KCACHE)
+
void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
uint32_t cp_coher_cntl = 0;
uint32_t compute =
- PKT3_SHADER_TYPE_S(!!(sctx->flags & R600_CONTEXT_FLAG_COMPUTE));
-
- /* XXX SI flushes both ICACHE and KCACHE if either flag is set.
- * XXX CIK shouldn't have this issue. Test CIK before separating the flags
- * XXX to ensure there is no regression. Also find out if there is another
- * XXX way to flush either ICACHE or KCACHE but not both for SI. */
- if (sctx->flags & (R600_CONTEXT_INV_SHADER_CACHE |
- R600_CONTEXT_INV_CONST_CACHE)) {
- cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
- S_0085F0_SH_KCACHE_ACTION_ENA(1);
- }
- if (sctx->flags & (R600_CONTEXT_INV_TEX_CACHE |
- R600_CONTEXT_STREAMOUT_FLUSH)) {
- cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
- S_0085F0_TCL1_ACTION_ENA(1);
- }
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) {
+ PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
+
+ /* SI has a bug that it always flushes ICACHE and KCACHE if either
+ * bit is set. An alternative way is to write SQC_CACHES, but that
+ * doesn't seem to work reliably. Since the bug doesn't affect
+ * correctness (it only does more work than necessary) and
+ * the performance impact is likely negligible, there is no plan
+ * to fix it.
+ */
+
+ if (sctx->flags & SI_CONTEXT_INV_ICACHE)
+ cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
+ if (sctx->flags & SI_CONTEXT_INV_KCACHE)
+ cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
+
+ if (sctx->flags & SI_CONTEXT_INV_TC_L1)
+ cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
+ if (sctx->flags & SI_CONTEXT_INV_TC_L2) {
+ cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
+
+ /* TODO: this might not be needed. */
+ if (sctx->chip_class >= VI)
+ cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
+ }
+
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
S_0085F0_CB0_DEST_BASE_ENA(1) |
S_0085F0_CB1_DEST_BASE_ENA(1) |
S_0085F0_CB6_DEST_BASE_ENA(1) |
S_0085F0_CB7_DEST_BASE_ENA(1);
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
S_0085F0_DB_DEST_BASE_ENA(1);
}
- if (cp_coher_cntl) {
- if (sctx->chip_class >= CIK) {
- radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
- radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
- radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
- radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
- radeon_emit(cs, 0); /* CP_COHER_BASE */
- radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
- radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
- } else {
- radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
- radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
- radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
- radeon_emit(cs, 0); /* CP_COHER_BASE */
- radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
- }
- }
-
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
- if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
+ if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
}
- if (sctx->flags & R600_CONTEXT_FLUSH_WITH_INV_L2) {
+ if (sctx->flags & SI_CONTEXT_FLUSH_WITH_INV_L2) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) | EVENT_INDEX(7) |
EVENT_WRITE_INV_L2);
}
- if (sctx->flags & R600_CONTEXT_PS_PARTIAL_FLUSH) {
+ /* FLUSH_AND_INV events must be emitted before PS_PARTIAL_FLUSH.
+ * Otherwise, clearing CMASK (CB meta) with CP DMA isn't reliable.
+ *
+ * I think the reason is that FLUSH_AND_INV is only added to a queue
+ * and it is PS_PARTIAL_FLUSH that waits for it to complete.
+ */
+ if (sctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- } else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
- /* Needed if streamout buffers are going to be used as a source. */
+ } else if (sctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
-
- if (sctx->flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
+ if (sctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
}
-
- if (sctx->flags & R600_CONTEXT_VGT_FLUSH) {
+ if (sctx->flags & SI_CONTEXT_VGT_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
- if (sctx->flags & R600_CONTEXT_VGT_STREAMOUT_SYNC) {
+ if (sctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0) | compute);
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
}
+ /* SURFACE_SYNC must be emitted after partial flushes.
+ * It looks like SURFACE_SYNC flushes caches immediately and doesn't
+ * wait for any engines. This should be last.
+ */
+ if (cp_coher_cntl) {
+ if (sctx->chip_class >= CIK) {
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0) | compute);
+ radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ } else {
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0) | compute);
+ radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ }
+ }
+
sctx->flags = 0;
}
-const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 21 }; /* number of CS dwords */
+const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 24 }; /* number of CS dwords */
static void si_get_draw_start_count(struct si_context *sctx,
const struct pipe_draw_info *info,
{
struct si_context *sctx = (struct si_context *)ctx;
struct pipe_index_buffer ib = {};
- uint32_t i;
+ unsigned i;
if (!info->count && !info->indirect &&
(info->indexed || !info->count_from_stream_output))
return;
- if (!sctx->ps_shader || !sctx->vs_shader)
+ if (!sctx->ps_shader || !sctx->vs_shader) {
+ assert(0);
+ return;
+ }
+ if (!!sctx->tes_shader != (info->mode == PIPE_PRIM_PATCHES)) {
+ assert(0);
return;
+ }
si_decompress_textures(sctx);
- si_update_shaders(sctx);
- if (sctx->vertex_buffers_dirty) {
- si_update_vertex_buffers(sctx);
- sctx->vertex_buffers_dirty = false;
- }
+ /* Set the rasterization primitive type.
+ *
+ * This must be done after si_decompress_textures, which can call
+ * draw_vbo recursively, and before si_update_shaders, which uses
+ * current_rast_prim for this draw_vbo call. */
+ if (sctx->gs_shader)
+ sctx->current_rast_prim = sctx->gs_shader->gs_output_prim;
+ else if (sctx->tes_shader)
+ sctx->current_rast_prim =
+ sctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ else
+ sctx->current_rast_prim = info->mode;
+
+ si_update_shaders(sctx);
+ if (!si_upload_shader_descriptors(sctx))
+ return;
if (info->indexed) {
/* Initialize the index buffer struct. */
ib.offset = sctx->index_buffer.offset;
/* Translate or upload, if needed. */
- if (ib.index_size == 1) {
+ /* 8-bit indices are supported on VI. */
+ if (sctx->b.chip_class <= CIK && ib.index_size == 1) {
struct pipe_resource *out_buffer = NULL;
unsigned out_offset, start, count, start_offset;
void *ptr;
}
}
+ /* TODO: VI should read index buffers through TC, so this shouldn't be
+ * needed on VI. */
+ if (info->indexed && r600_resource(ib.buffer)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_INV_TC_L2;
+ r600_resource(ib.buffer)->TC_L2_dirty = false;
+ }
+
/* Check flush flags. */
if (sctx->b.flags)
- sctx->atoms.s.cache_flush->dirty = true;
+ si_mark_atom_dirty(sctx, sctx->atoms.s.cache_flush);
si_need_cs_space(sctx, 0, TRUE);
}
si_pm4_emit_dirty(sctx);
- si_emit_rasterizer_prim_state(sctx, info->mode);
- si_emit_draw_registers(sctx, info, &ib);
+ si_emit_scratch_reloc(sctx);
+ si_emit_rasterizer_prim_state(sctx);
+ si_emit_draw_registers(sctx, info);
si_emit_draw_packets(sctx, info, &ib);
-#if SI_TRACE_CS
- if (sctx->screen->b.trace_bo) {
+ if (sctx->trace_buf)
si_trace_emit(sctx);
- }
-#endif
/* Workaround for a VGT hang when streamout is enabled.
* It must be done after drawing. */
- if (sctx->b.family == CHIP_HAWAII &&
+ if ((sctx->b.family == CHIP_HAWAII || sctx->b.family == CHIP_TONGA) &&
(sctx->b.streamout.streamout_enabled ||
sctx->b.streamout.prims_gen_query_enabled)) {
- sctx->b.flags |= R600_CONTEXT_VGT_STREAMOUT_SYNC;
+ sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
}
/* Set the depth buffer as dirty. */
sctx->b.num_draw_calls++;
}
-#if SI_TRACE_CS
void si_trace_emit(struct si_context *sctx)
{
- struct si_screen *sscreen = sctx->screen;
struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- uint64_t va;
- va = sscreen->b.trace_bo->gpu_address;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sscreen->b.trace_bo,
+ sctx->trace_id++;
+ r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sctx->trace_buf,
RADEON_USAGE_READWRITE, RADEON_PRIO_MIN);
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
- radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
- PKT3_WRITE_DATA_WR_CONFIRM |
- PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
- radeon_emit(cs, va & 0xFFFFFFFFUL);
- radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
- radeon_emit(cs, cs->cdw);
- radeon_emit(cs, sscreen->b.cs_count);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, sctx->trace_buf->gpu_address);
+ radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
+ radeon_emit(cs, sctx->trace_id);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, SI_ENCODE_TRACE_POINT(sctx->trace_id));
}
-#endif