const struct pipe_draw_info *info,
unsigned *num_patches)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- struct si_shader_selector *ls = sctx->vs_shader;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct si_shader_ctx_state *ls = &sctx->vs_shader;
/* The TES pointer will only be used for sctx->last_tcs.
* It would be wrong to think that TCS = TES. */
struct si_shader_selector *tcs =
- sctx->tcs_shader ? sctx->tcs_shader : sctx->tes_shader;
+ sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL];
unsigned num_tcs_input_cp = info->vertices_per_patch;
unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
/* This calculates how shader inputs and outputs among VS, TCS, and TES
* are laid out in LDS. */
- num_tcs_inputs = util_last_bit64(ls->outputs_written);
+ num_tcs_inputs = util_last_bit64(ls->cso->outputs_written);
- if (sctx->tcs_shader) {
+ if (sctx->tcs_shader.cso) {
num_tcs_outputs = util_last_bit64(tcs->outputs_written);
num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
- output_patch0_offset = sctx->tcs_shader ? input_patch_size * *num_patches : 0;
+ output_patch0_offset = sctx->tcs_shader.cso ? input_patch_size * *num_patches : 0;
perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
lds_size = output_patch0_offset + output_patch_size * *num_patches;
- ls_rsrc2 = ls->current->ls_rsrc2;
+ ls_rsrc2 = ls->current->config.rsrc2;
if (sctx->b.chip_class >= CIK) {
assert(lds_size <= 65536);
/* Due to a hw bug, RSRC2_LS must be written twice with another
* LS register written in between. */
if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII)
- si_write_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
- si_write_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
- radeon_emit(cs, ls->current->ls_rsrc1);
+ radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
+ radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
+ radeon_emit(cs, ls->current->config.rsrc1);
radeon_emit(cs, ls_rsrc2);
/* Compute userdata SGPRs. */
((perpatch_output_offset / 16) << 16);
/* Set them for LS. */
- si_write_sh_reg(cs,
+ radeon_set_sh_reg(cs,
R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4,
tcs_in_layout);
/* Set them for TCS. */
- si_write_sh_reg_seq(cs,
+ radeon_set_sh_reg_seq(cs,
R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OUT_OFFSETS * 4, 3);
radeon_emit(cs, tcs_out_offsets);
radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26));
radeon_emit(cs, tcs_in_layout);
/* Set them for TES. */
- si_write_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
+ radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OUT_OFFSETS * 4, 2);
radeon_emit(cs, tcs_out_offsets);
radeon_emit(cs, tcs_out_layout | (num_tcs_output_cp << 26));
}
+static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info)
+{
+ switch (info->mode) {
+ case PIPE_PRIM_PATCHES:
+ return info->count / info->vertices_per_patch;
+ case R600_PRIM_RECTANGLE_LIST:
+ return info->count / 3;
+ default:
+ return u_prims_for_vertices(info->mode, info->count);
+ }
+}
+
static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned num_patches)
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned prim = info->mode;
unsigned primgroup_size = 128; /* recommended without a GS */
+ unsigned max_primgroup_in_wave = 2;
/* SWITCH_ON_EOP(0) is always preferable. */
bool wd_switch_on_eop = false;
bool partial_vs_wave = false;
bool partial_es_wave = false;
- if (sctx->gs_shader)
+ if (sctx->gs_shader.cso)
primgroup_size = 64; /* recommended with a GS */
- if (sctx->tes_shader) {
+ if (sctx->tes_shader.cso) {
unsigned num_cp_out =
- sctx->tcs_shader ?
- sctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
+ sctx->tcs_shader.cso ?
+ sctx->tcs_shader.cso->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
info->vertices_per_patch;
unsigned max_size = 256 / MAX2(info->vertices_per_patch, num_cp_out);
/* primgroup_size must be set to a multiple of NUM_PATCHES */
primgroup_size = (primgroup_size / num_patches) * num_patches;
- /* SWITCH_ON_EOI must be set if PrimID is used.
- * If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
- if ((sctx->tcs_shader && sctx->tcs_shader->info.uses_primid) ||
- sctx->tes_shader->info.uses_primid) {
+ /* SWITCH_ON_EOI must be set if PrimID is used. */
+ if ((sctx->tcs_shader.cso && sctx->tcs_shader.cso->info.uses_primid) ||
+ sctx->tes_shader.cso->info.uses_primid)
ia_switch_on_eoi = true;
- partial_es_wave = true;
- }
/* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
if ((sctx->b.family == CHIP_TAHITI ||
sctx->b.family == CHIP_PITCAIRN ||
sctx->b.family == CHIP_BONAIRE) &&
- sctx->gs_shader)
+ sctx->gs_shader.cso)
partial_vs_wave = true;
}
wd_switch_on_eop = true;
}
- if (sctx->b.streamout.streamout_enabled ||
- sctx->b.streamout.prims_gen_query_enabled)
- partial_vs_wave = true;
-
if (sctx->b.chip_class >= CIK) {
/* WD_SWITCH_ON_EOP has no effect on GPUs with less than
* 4 shader engines. Set 1 to pass the assertion below.
prim == PIPE_PRIM_LINE_LOOP ||
prim == PIPE_PRIM_TRIANGLE_FAN ||
prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
- info->primitive_restart)
+ info->primitive_restart ||
+ info->count_from_stream_output)
wd_switch_on_eop = true;
/* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
(info->indirect || info->instance_count > 1))
wd_switch_on_eop = true;
- /* USE_OPAQUE doesn't work when WD_SWITCH_ON_EOP is 0. */
- if (info->count_from_stream_output)
- wd_switch_on_eop = true;
+ /* Required on CIK and later. */
+ if (sctx->b.screen->info.max_se > 2 && !wd_switch_on_eop)
+ ia_switch_on_eoi = true;
+
+ /* Required by Hawaii and, for some special cases, by VI. */
+ if (ia_switch_on_eoi &&
+ (sctx->b.family == CHIP_HAWAII ||
+ (sctx->b.chip_class == VI &&
+ (sctx->gs_shader.cso || max_primgroup_in_wave != 2))))
+ partial_vs_wave = true;
+
+ /* Instancing bug on Bonaire. */
+ if (sctx->b.family == CHIP_BONAIRE && ia_switch_on_eoi &&
+ (info->indirect || info->instance_count > 1))
+ partial_vs_wave = true;
/* If the WD switch is false, the IA switch must be false too. */
assert(wd_switch_on_eop || !ia_switch_on_eop);
}
+ /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
+ if (ia_switch_on_eoi)
+ partial_es_wave = true;
+
+ /* GS requirement. */
+ if (SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
+ partial_es_wave = true;
+
/* Hw bug with single-primitive instances and SWITCH_ON_EOI
* on multi-SE chips. */
if (sctx->b.screen->info.max_se >= 2 && ia_switch_on_eoi &&
(info->indirect ||
(info->instance_count > 1 &&
- u_prims_for_vertices(info->mode, info->count) <= 1)))
+ si_num_prims_for_vertices(info) <= 1)))
sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
- /* Instancing bug on 2 SE chips. */
- if (sctx->b.screen->info.max_se == 2 && ia_switch_on_eoi &&
- (info->indirect || info->instance_count > 1))
- partial_vs_wave = true;
-
return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
- S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0);
+ S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
+ S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ?
+ max_primgroup_in_wave : 0);
}
static unsigned si_get_ls_hs_config(struct si_context *sctx,
{
unsigned num_output_cp;
- if (!sctx->tes_shader)
+ if (!sctx->tes_shader.cso)
return 0;
- num_output_cp = sctx->tcs_shader ?
- sctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
+ num_output_cp = sctx->tcs_shader.cso ?
+ sctx->tcs_shader.cso->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] :
info->vertices_per_patch;
return S_028B58_NUM_PATCHES(num_patches) |
static void si_emit_scratch_reloc(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
if (!sctx->emit_scratch_reloc)
return;
- r600_write_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+ radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
sctx->spi_tmpring_size);
if (sctx->scratch_buffer) {
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
sctx->scratch_buffer, RADEON_USAGE_READWRITE,
- RADEON_PRIO_SHADER_RESOURCE_RW);
+ RADEON_PRIO_SCRATCH_BUFFER);
}
sctx->emit_scratch_reloc = false;
/* rast_prim is the primitive type after GS. */
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
return;
- r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
+ radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
rs->pa_sc_line_stipple |
S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 :
rast_prim == PIPE_PRIM_LINE_STRIP ? 2 : 0));
static void si_emit_draw_registers(struct si_context *sctx,
const struct pipe_draw_info *info)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
unsigned ia_multi_vgt_param, ls_hs_config, num_patches = 0;
- if (sctx->tes_shader)
+ if (sctx->tes_shader.cso)
si_emit_derived_tess_state(sctx, info, &num_patches);
ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
radeon_emit(cs, ia_multi_vgt_param); /* IA_MULTI_VGT_PARAM */
radeon_emit(cs, ls_hs_config); /* VGT_LS_HS_CONFIG */
} else {
- r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
- r600_write_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
- r600_write_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
+ radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
+ radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
+ radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
}
sctx->last_prim = prim;
sctx->last_multi_vgt_param = ia_multi_vgt_param;
}
if (gs_out_prim != sctx->last_gs_out_prim) {
- r600_write_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
+ radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
sctx->last_gs_out_prim = gs_out_prim;
}
/* Primitive restart. */
if (info->primitive_restart != sctx->last_primitive_restart_en) {
- r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
+ radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
sctx->last_primitive_restart_en = info->primitive_restart;
if (info->primitive_restart &&
(info->restart_index != sctx->last_restart_index ||
sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
- r600_write_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
+ radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
info->restart_index);
sctx->last_restart_index = info->restart_index;
}
const struct pipe_draw_info *info,
const struct pipe_index_buffer *ib)
{
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
+ bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
if (info->count_from_stream_output) {
struct r600_so_target *t =
uint64_t va = t->buf_filled_size->gpu_address +
t->buf_filled_size_offset;
- r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
+ radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
t->stride_in_dw);
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
radeon_emit(cs, 0); /* unused */
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
t->buf_filled_size, RADEON_USAGE_READ,
- RADEON_PRIO_MIN);
+ RADEON_PRIO_SO_FILLED_SIZE);
}
/* draw packet */
if (info->indexed) {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- if (ib->index_size == 4) {
- radeon_emit(cs, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
- } else {
- radeon_emit(cs, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
- V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
+ /* index type */
+ switch (ib->index_size) {
+ case 1:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_8);
+ break;
+ case 2:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_16 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
+ break;
+ case 4:
+ radeon_emit(cs, V_028A7C_VGT_INDEX_32 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
+ break;
+ default:
+ assert(!"unreachable");
+ return;
}
}
sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
info->start_instance != sctx->last_start_instance ||
sh_base_reg != sctx->last_sh_base_reg) {
- si_write_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
+ radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
radeon_emit(cs, base_vertex);
radeon_emit(cs, info->start_instance);
} else {
si_invalidate_draw_sh_constants(sctx);
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource *)info->indirect,
- RADEON_USAGE_READ, RADEON_PRIO_MIN);
+ RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
}
if (info->indexed) {
ib->index_size;
uint64_t index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource *)ib->buffer,
- RADEON_USAGE_READ, RADEON_PRIO_MIN);
+ RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
if (info->indirect) {
uint64_t indirect_va = r600_resource(info->indirect)->gpu_address;
radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
radeon_emit(cs, index_max_size);
- radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_INDIRECT, 3, sctx->b.predicate_drawing));
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_INDIRECT, 3, render_cond_bit));
radeon_emit(cs, info->indirect_offset);
radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
} else {
index_va += info->start * ib->index_size;
- radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, sctx->b.predicate_drawing));
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
radeon_emit(cs, index_max_size);
radeon_emit(cs, index_va);
radeon_emit(cs, (index_va >> 32UL) & 0xFF);
radeon_emit(cs, indirect_va);
radeon_emit(cs, indirect_va >> 32);
- radeon_emit(cs, PKT3(PKT3_DRAW_INDIRECT, 3, sctx->b.predicate_drawing));
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDIRECT, 3, render_cond_bit));
radeon_emit(cs, info->indirect_offset);
radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX);
} else {
- radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, sctx->b.predicate_drawing));
+ radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
}
}
-#define BOTH_ICACHE_KCACHE (SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_KCACHE)
-
-void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom)
+void si_emit_cache_flush(struct si_context *si_ctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
+ struct r600_common_context *sctx = &si_ctx->b;
+ struct radeon_winsys_cs *cs = sctx->gfx.cs;
uint32_t cp_coher_cntl = 0;
uint32_t compute =
PKT3_SHADER_TYPE_S(!!(sctx->flags & SI_CONTEXT_FLAG_COMPUTE));
if (sctx->flags & SI_CONTEXT_INV_ICACHE)
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
- if (sctx->flags & SI_CONTEXT_INV_KCACHE)
+ if (sctx->flags & SI_CONTEXT_INV_SMEM_L1)
cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
- if (sctx->flags & SI_CONTEXT_INV_TC_L1)
+ if (sctx->flags & SI_CONTEXT_INV_VMEM_L1)
cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
- if (sctx->flags & SI_CONTEXT_INV_TC_L2)
+ if (sctx->flags & SI_CONTEXT_INV_GLOBAL_L2) {
cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
+ /* TODO: this might not be needed. */
+ if (sctx->chip_class >= VI)
+ cp_coher_cntl |= S_0301F0_TC_WB_ACTION_ENA(1);
+ }
+
if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
S_0085F0_CB0_DEST_BASE_ENA(1) |
S_0085F0_CB5_DEST_BASE_ENA(1) |
S_0085F0_CB6_DEST_BASE_ENA(1) |
S_0085F0_CB7_DEST_BASE_ENA(1);
+
+ /* Necessary for DCC */
+ if (sctx->chip_class >= VI) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0) | compute);
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_DATA_TS) |
+ EVENT_INDEX(5));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, 0);
+ radeon_emit(cs, 0);
+ radeon_emit(cs, 0);
+ }
}
if (sctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
sctx->flags = 0;
}
-const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 24 }; /* number of CS dwords */
-
static void si_get_draw_start_count(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned *start, unsigned *count)
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct pipe_index_buffer ib = {};
- unsigned i;
+ unsigned mask;
if (!info->count && !info->indirect &&
(info->indexed || !info->count_from_stream_output))
return;
- if (!sctx->ps_shader || !sctx->vs_shader)
+ if (!sctx->vs_shader.cso) {
+ assert(0);
+ return;
+ }
+ if (!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard)) {
+ assert(0);
+ return;
+ }
+ if (!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES)) {
+ assert(0);
return;
+ }
si_decompress_textures(sctx);
* This must be done after si_decompress_textures, which can call
* draw_vbo recursively, and before si_update_shaders, which uses
* current_rast_prim for this draw_vbo call. */
- if (sctx->gs_shader)
- sctx->current_rast_prim = sctx->gs_shader->gs_output_prim;
- else if (sctx->tes_shader)
+ if (sctx->gs_shader.cso)
+ sctx->current_rast_prim = sctx->gs_shader.cso->gs_output_prim;
+ else if (sctx->tes_shader.cso)
sctx->current_rast_prim =
- sctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
+ sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
else
sctx->current_rast_prim = info->mode;
- si_update_shaders(sctx);
-
- if (sctx->vertex_buffers_dirty) {
- si_update_vertex_buffers(sctx);
- sctx->vertex_buffers_dirty = false;
- }
+ if (!si_update_shaders(sctx) ||
+ !si_upload_shader_descriptors(sctx))
+ return;
if (info->indexed) {
/* Initialize the index buffer struct. */
ib.offset = sctx->index_buffer.offset;
/* Translate or upload, if needed. */
- if (ib.index_size == 1) {
+ /* 8-bit indices are supported on VI. */
+ if (sctx->b.chip_class <= CIK && ib.index_size == 1) {
struct pipe_resource *out_buffer = NULL;
unsigned out_offset, start, count, start_offset;
void *ptr;
si_get_draw_start_count(sctx, info, &start, &count);
start_offset = start * ib.index_size;
- u_upload_alloc(sctx->b.uploader, start_offset, count * 2,
+ u_upload_alloc(sctx->b.uploader, start_offset, count * 2, 256,
&out_offset, &out_buffer, &ptr);
+ if (!out_buffer) {
+ pipe_resource_reference(&ib.buffer, NULL);
+ return;
+ }
util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
ib.offset + start_offset,
start_offset = start * ib.index_size;
u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
- (char*)ib.user_buffer + start_offset,
+ 256, (char*)ib.user_buffer + start_offset,
&ib.offset, &ib.buffer);
+ if (!ib.buffer)
+ return;
/* info->start will be added by the drawing code */
ib.offset -= start_offset;
}
}
- if (info->indexed && r600_resource(ib.buffer)->TC_L2_dirty) {
- sctx->b.flags |= SI_CONTEXT_INV_TC_L2;
+ /* VI reads index buffers through TC L2. */
+ if (info->indexed && sctx->b.chip_class <= CIK &&
+ r600_resource(ib.buffer)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
r600_resource(ib.buffer)->TC_L2_dirty = false;
}
/* Check flush flags. */
if (sctx->b.flags)
- sctx->atoms.s.cache_flush->dirty = true;
+ si_mark_atom_dirty(sctx, sctx->atoms.s.cache_flush);
- si_need_cs_space(sctx, 0, TRUE);
+ si_need_cs_space(sctx);
/* Emit states. */
- for (i = 0; i < SI_NUM_ATOMS(sctx); i++) {
- if (sctx->atoms.array[i]->dirty) {
- sctx->atoms.array[i]->emit(&sctx->b, sctx->atoms.array[i]);
- sctx->atoms.array[i]->dirty = false;
- }
+ mask = sctx->dirty_atoms;
+ while (mask) {
+ struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
+
+ atom->emit(&sctx->b, atom);
}
+ sctx->dirty_atoms = 0;
si_pm4_emit_dirty(sctx);
si_emit_scratch_reloc(sctx);
si_emit_draw_registers(sctx, info);
si_emit_draw_packets(sctx, info, &ib);
-#if SI_TRACE_CS
- if (sctx->screen->b.trace_bo) {
+ if (sctx->trace_buf)
si_trace_emit(sctx);
- }
-#endif
/* Workaround for a VGT hang when streamout is enabled.
* It must be done after drawing. */
- if (sctx->b.family == CHIP_HAWAII &&
+ if ((sctx->b.family == CHIP_HAWAII ||
+ sctx->b.family == CHIP_TONGA ||
+ sctx->b.family == CHIP_FIJI) &&
(sctx->b.streamout.streamout_enabled ||
sctx->b.streamout.prims_gen_query_enabled)) {
sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
struct r600_texture *rtex = (struct r600_texture *)surf->texture;
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
+
+ if (rtex->surface.flags & RADEON_SURF_SBUFFER)
+ rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level;
}
if (sctx->framebuffer.compressed_cb_mask) {
struct pipe_surface *surf;
sctx->b.num_draw_calls++;
}
-#if SI_TRACE_CS
void si_trace_emit(struct si_context *sctx)
{
- struct si_screen *sscreen = sctx->screen;
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
- uint64_t va;
-
- va = sscreen->b.trace_bo->gpu_address;
- r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, sscreen->b.trace_bo,
- RADEON_USAGE_READWRITE, RADEON_PRIO_MIN);
- radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
- radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_MEM_SYNC) |
- PKT3_WRITE_DATA_WR_CONFIRM |
- PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
- radeon_emit(cs, va & 0xFFFFFFFFUL);
- radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
- radeon_emit(cs, cs->cdw);
- radeon_emit(cs, sscreen->b.cs_count);
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+
+ sctx->trace_id++;
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, sctx->trace_buf,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, sctx->trace_buf->gpu_address);
+ radeon_emit(cs, sctx->trace_buf->gpu_address >> 32);
+ radeon_emit(cs, sctx->trace_id);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, SI_ENCODE_TRACE_POINT(sctx->trace_id));
}
-#endif