const struct pipe_draw_info *info,
unsigned *num_patches)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
struct si_shader *ls_current;
struct si_shader_selector *ls;
/* The TES pointer will only be used for sctx->last_tcs.
assert(num_tcs_input_cp <= 32);
assert(num_tcs_output_cp <= 32);
+ uint64_t ring_va = r600_resource(sctx->tess_rings)->gpu_address;
+ assert((ring_va & u_bit_consecutive(0, 19)) == 0);
+
tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
tcs_out_layout = (output_patch_size / 4) |
- (num_tcs_input_cp << 13);
+ (num_tcs_input_cp << 13) |
+ ring_va;
tcs_out_offsets = (output_patch0_offset / 16) |
((perpatch_output_offset / 16) << 16);
offchip_layout = *num_patches |
/* Set userdata SGPRs for TES. */
radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
radeon_emit(cs, offchip_layout);
- radeon_emit(cs, r600_resource(sctx->tess_offchip_ring)->gpu_address >> 16);
+ radeon_emit(cs, ring_va);
ls_hs_config = S_028B58_NUM_PATCHES(*num_patches) |
S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
/* rast_prim is the primitive type after GS. */
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
}
if (sctx->current_vs_state != sctx->last_vs_state) {
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
const struct pipe_draw_info *info,
unsigned num_patches)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
unsigned ia_multi_vgt_param;
unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
uint32_t index_max_size = 0;
radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
radeon_emit(cs, 0); /* unused */
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
t->buf_filled_size, RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
}
index_size;
index_va = r600_resource(indexbuf)->gpu_address + index_offset;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource *)indexbuf,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
radeon_emit(cs, indirect_va);
radeon_emit(cs, indirect_va >> 32);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(sctx, sctx->b.gfx_cs,
(struct r600_resource *)indirect->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
(struct r600_resource *)indirect->indirect_draw_count;
radeon_add_to_buffer_list(
- &sctx->b, &sctx->b.gfx, params_buf,
+ sctx, sctx->b.gfx_cs, params_buf,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
}
}
-static void si_emit_surface_sync(struct r600_common_context *rctx,
+static void si_emit_surface_sync(struct si_context *sctx,
unsigned cp_coher_cntl)
{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
- if (rctx->chip_class >= GFX9) {
+ if (sctx->b.chip_class >= GFX9) {
/* Flush caches and wait for the caches to assert idle. */
radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
void si_emit_cache_flush(struct si_context *sctx)
{
- struct r600_common_context *rctx = &sctx->b;
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
+ uint32_t flags = sctx->b.flags;
uint32_t cp_coher_cntl = 0;
- uint32_t flush_cb_db = rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
- SI_CONTEXT_FLUSH_AND_INV_DB);
+ uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
+ SI_CONTEXT_FLUSH_AND_INV_DB);
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB)
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
sctx->b.num_cb_cache_flushes++;
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB)
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
sctx->b.num_db_cache_flushes++;
/* SI has a bug that it always flushes ICACHE and KCACHE if either
* to add a workaround for it.
*/
- if (rctx->flags & SI_CONTEXT_INV_ICACHE)
+ if (flags & SI_CONTEXT_INV_ICACHE)
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
- if (rctx->flags & SI_CONTEXT_INV_SMEM_L1)
+ if (flags & SI_CONTEXT_INV_SMEM_L1)
cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
- if (rctx->chip_class <= VI) {
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ if (sctx->b.chip_class <= VI) {
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
S_0085F0_CB0_DEST_BASE_ENA(1) |
S_0085F0_CB1_DEST_BASE_ENA(1) |
S_0085F0_CB7_DEST_BASE_ENA(1);
/* Necessary for DCC */
- if (rctx->chip_class == VI)
- si_gfx_write_event_eop(rctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
- 0, EOP_DATA_SEL_DISCARD, NULL,
- 0, 0, SI_NOT_QUERY);
+ if (sctx->b.chip_class == VI)
+ si_gfx_write_event_eop(sctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
+ 0, EOP_DATA_SEL_DISCARD, NULL,
+ 0, 0, SI_NOT_QUERY);
}
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB)
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
S_0085F0_DB_DEST_BASE_ENA(1);
}
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
+ if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
/* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
- if (rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_DB |
- SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
+ if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB |
+ SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
/* Flush HTILE. SURFACE_SYNC will wait for idle. */
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
* for everything including CB/DB cache flushes.
*/
if (!flush_cb_db) {
- if (rctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
+ if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
/* Only count explicit shader flushes, not implicit ones
* done by SURFACE_SYNC.
*/
- rctx->num_vs_flushes++;
- rctx->num_ps_flushes++;
- } else if (rctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
+ sctx->b.num_vs_flushes++;
+ sctx->b.num_ps_flushes++;
+ } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- rctx->num_vs_flushes++;
+ sctx->b.num_vs_flushes++;
}
}
- if (rctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
+ if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
sctx->compute_is_busy) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
- rctx->num_cs_flushes++;
+ sctx->b.num_cs_flushes++;
sctx->compute_is_busy = false;
}
/* VGT state synchronization. */
- if (rctx->flags & SI_CONTEXT_VGT_FLUSH) {
+ if (flags & SI_CONTEXT_VGT_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
- if (rctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
+ if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
}
*/
tc_flags = 0;
- if (rctx->flags & SI_CONTEXT_INV_L2_METADATA) {
+ if (flags & SI_CONTEXT_INV_L2_METADATA) {
tc_flags = EVENT_TC_ACTION_ENA |
EVENT_TC_MD_ACTION_ENA;
}
/* Ideally flush TC together with CB/DB. */
- if (rctx->flags & SI_CONTEXT_INV_GLOBAL_L2) {
+ if (flags & SI_CONTEXT_INV_GLOBAL_L2) {
/* Writeback and invalidate everything in L2 & L1. */
tc_flags = EVENT_TC_ACTION_ENA |
EVENT_TC_WB_ACTION_ENA;
/* Clear the flags. */
- rctx->flags &= ~(SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_WRITEBACK_GLOBAL_L2 |
- SI_CONTEXT_INV_VMEM_L1);
+ flags &= ~(SI_CONTEXT_INV_GLOBAL_L2 |
+ SI_CONTEXT_WRITEBACK_GLOBAL_L2 |
+ SI_CONTEXT_INV_VMEM_L1);
sctx->b.num_L2_invalidates++;
}
va = sctx->wait_mem_scratch->gpu_address;
sctx->wait_mem_number++;
- si_gfx_write_event_eop(rctx, cb_db_event, tc_flags,
- EOP_DATA_SEL_VALUE_32BIT,
- sctx->wait_mem_scratch, va,
- sctx->wait_mem_number, SI_NOT_QUERY);
- si_gfx_wait_fence(rctx, va, sctx->wait_mem_number, 0xffffffff);
+ si_gfx_write_event_eop(sctx, cb_db_event, tc_flags,
+ EOP_DATA_SEL_VALUE_32BIT,
+ sctx->wait_mem_scratch, va,
+ sctx->wait_mem_number, SI_NOT_QUERY);
+ si_gfx_wait_fence(sctx, va, sctx->wait_mem_number, 0xffffffff);
}
/* Make sure ME is idle (it executes most packets) before continuing.
* This prevents read-after-write hazards between PFP and ME.
*/
if (cp_coher_cntl ||
- (rctx->flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
+ (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
SI_CONTEXT_INV_VMEM_L1 |
SI_CONTEXT_INV_GLOBAL_L2 |
SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
*
* SI-CIK don't support L2 write-back.
*/
- if (rctx->flags & SI_CONTEXT_INV_GLOBAL_L2 ||
- (rctx->chip_class <= CIK &&
- (rctx->flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
+ if (flags & SI_CONTEXT_INV_GLOBAL_L2 ||
+ (sctx->b.chip_class <= CIK &&
+ (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
/* Invalidate L1 & L2. (L1 is always invalidated on SI)
* WB must be set on VI+ when TC_ACTION is set.
*/
- si_emit_surface_sync(rctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) |
- S_0301F0_TC_WB_ACTION_ENA(rctx->chip_class >= VI));
+ S_0301F0_TC_WB_ACTION_ENA(sctx->b.chip_class >= VI));
cp_coher_cntl = 0;
sctx->b.num_L2_invalidates++;
} else {
/* L1 invalidation and L2 writeback must be done separately,
* because both operations can't be done together.
*/
- if (rctx->flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2) {
+ if (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2) {
/* WB = write-back
* NC = apply to non-coherent MTYPEs
* (i.e. MTYPE <= 1, which is what we use everywhere)
*
* WB doesn't work without NC.
*/
- si_emit_surface_sync(rctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) |
S_0301F0_TC_NC_ACTION_ENA(1));
cp_coher_cntl = 0;
sctx->b.num_L2_writebacks++;
}
- if (rctx->flags & SI_CONTEXT_INV_VMEM_L1) {
+ if (flags & SI_CONTEXT_INV_VMEM_L1) {
/* Invalidate per-CU VMEM L1. */
- si_emit_surface_sync(rctx, cp_coher_cntl |
+ si_emit_surface_sync(sctx, cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0;
}
/* If TC flushes haven't cleared this... */
if (cp_coher_cntl)
- si_emit_surface_sync(rctx, cp_coher_cntl);
+ si_emit_surface_sync(sctx, cp_coher_cntl);
- if (rctx->flags & SI_CONTEXT_START_PIPELINE_STATS) {
+ if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
EVENT_INDEX(0));
- } else if (rctx->flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
+ } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
EVENT_INDEX(0));
}
- rctx->flags = 0;
+ sctx->b.flags = 0;
}
static void si_get_draw_start_count(struct si_context *sctx,
while (mask) {
struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
- atom->emit(&sctx->b, atom);
+ atom->emit(sctx, atom);
}
sctx->dirty_atoms &= skip_atom_mask;
}
}
- si_need_cs_space(sctx);
+ si_need_gfx_cs_space(sctx);
/* Since we've called r600_context_add_resource_size for vertex buffers,
* this must be called after si_need_cs_space, because we must let
/* Set shader pointers after descriptors are uploaded. */
if (si_is_atom_dirty(sctx, shader_pointers))
- shader_pointers->emit(&sctx->b, NULL);
+ shader_pointers->emit(sctx, NULL);
if (si_is_atom_dirty(sctx, &sctx->b.render_cond_atom))
- sctx->b.render_cond_atom.emit(&sctx->b, NULL);
+ sctx->b.render_cond_atom.emit(sctx, NULL);
sctx->dirty_atoms = 0;
si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
void si_trace_emit(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx_cs;
uint64_t va = sctx->current_saved_cs->trace_buf->gpu_address;
uint32_t trace_id = ++sctx->current_saved_cs->trace_id;