#include "si_pipe.h"
#include "radeon/r600_cs.h"
#include "sid.h"
+#include "gfx9d.h"
#include "util/u_index_modify.h"
#include "util/u_upload_mgr.h"
}
}
-static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
- const struct pipe_draw_info *info,
- unsigned num_patches)
+static unsigned
+si_get_init_multi_vgt_param(struct si_screen *sscreen,
+ union si_vgt_param_key *key)
{
- struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
- unsigned prim = info->mode;
- unsigned primgroup_size = 128; /* recommended without a GS */
+ STATIC_ASSERT(sizeof(union si_vgt_param_key) == 4);
unsigned max_primgroup_in_wave = 2;
/* SWITCH_ON_EOP(0) is always preferable. */
bool partial_vs_wave = false;
bool partial_es_wave = false;
- if (sctx->gs_shader.cso)
- primgroup_size = 64; /* recommended with a GS */
-
- if (sctx->tes_shader.cso) {
- /* primgroup_size must be set to a multiple of NUM_PATCHES */
- primgroup_size = num_patches;
-
+ if (key->u.uses_tess) {
/* SWITCH_ON_EOI must be set if PrimID is used. */
- if ((sctx->tcs_shader.cso && sctx->tcs_shader.cso->info.uses_primid) ||
- sctx->tes_shader.cso->info.uses_primid)
+ if (key->u.tcs_tes_uses_prim_id)
ia_switch_on_eoi = true;
/* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
- if ((sctx->b.family == CHIP_TAHITI ||
- sctx->b.family == CHIP_PITCAIRN ||
- sctx->b.family == CHIP_BONAIRE) &&
- sctx->gs_shader.cso)
+ if ((sscreen->b.family == CHIP_TAHITI ||
+ sscreen->b.family == CHIP_PITCAIRN ||
+ sscreen->b.family == CHIP_BONAIRE) &&
+ key->u.uses_gs)
partial_vs_wave = true;
/* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
- if (sctx->screen->has_distributed_tess) {
- if (sctx->gs_shader.cso) {
+ if (sscreen->has_distributed_tess) {
+ if (key->u.uses_gs) {
partial_es_wave = true;
/* GPU hang workaround. */
- if (sctx->b.family == CHIP_TONGA ||
- sctx->b.family == CHIP_FIJI ||
- sctx->b.family == CHIP_POLARIS10 ||
- sctx->b.family == CHIP_POLARIS11)
+ if (sscreen->b.family == CHIP_TONGA ||
+ sscreen->b.family == CHIP_FIJI ||
+ sscreen->b.family == CHIP_POLARIS10 ||
+ sscreen->b.family == CHIP_POLARIS11)
partial_vs_wave = true;
} else {
partial_vs_wave = true;
}
/* This is a hardware requirement. */
- if ((rs && rs->line_stipple_enable) ||
- (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) {
+ if (key->u.line_stipple_enabled ||
+ (sscreen->b.debug_flags & DBG_SWITCH_ON_EOP)) {
ia_switch_on_eop = true;
wd_switch_on_eop = true;
}
- if (sctx->b.chip_class >= CIK) {
+ if (sscreen->b.chip_class >= CIK) {
/* WD_SWITCH_ON_EOP has no effect on GPUs with less than
* 4 shader engines. Set 1 to pass the assertion below.
* The other cases are hardware requirements.
* Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
* for points, line strips, and tri strips.
*/
- if (sctx->b.screen->info.max_se < 4 ||
- prim == PIPE_PRIM_POLYGON ||
- prim == PIPE_PRIM_LINE_LOOP ||
- prim == PIPE_PRIM_TRIANGLE_FAN ||
- prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
- (info->primitive_restart &&
- (sctx->b.family < CHIP_POLARIS10 ||
- (prim != PIPE_PRIM_POINTS &&
- prim != PIPE_PRIM_LINE_STRIP &&
- prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
- info->count_from_stream_output)
+ if (sscreen->b.info.max_se < 4 ||
+ key->u.prim == PIPE_PRIM_POLYGON ||
+ key->u.prim == PIPE_PRIM_LINE_LOOP ||
+ key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
+ key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
+ (key->u.primitive_restart &&
+ (sscreen->b.family < CHIP_POLARIS10 ||
+ (key->u.prim != PIPE_PRIM_POINTS &&
+ key->u.prim != PIPE_PRIM_LINE_STRIP &&
+ key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
+ key->u.count_from_stream_output)
wd_switch_on_eop = true;
/* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
* We don't know that for indirect drawing, so treat it as
* always problematic. */
- if (sctx->b.family == CHIP_HAWAII &&
- (info->indirect || info->instance_count > 1))
+ if (sscreen->b.family == CHIP_HAWAII &&
+ key->u.uses_instancing)
wd_switch_on_eop = true;
/* Performance recommendation for 4 SE Gfx7-8 parts if
* Assume indirect draws always use small instances.
* This is needed for good VS wave utilization.
*/
- if (sctx->b.chip_class <= VI &&
- sctx->b.screen->info.max_se >= 4 &&
- (info->indirect ||
- (info->instance_count > 1 &&
- si_num_prims_for_vertices(info) < primgroup_size)))
+ if (sscreen->b.chip_class <= VI &&
+ sscreen->b.info.max_se == 4 &&
+ key->u.multi_instances_smaller_than_primgroup)
wd_switch_on_eop = true;
/* Required on CIK and later. */
- if (sctx->b.screen->info.max_se > 2 && !wd_switch_on_eop)
+ if (sscreen->b.info.max_se > 2 && !wd_switch_on_eop)
ia_switch_on_eoi = true;
/* Required by Hawaii and, for some special cases, by VI. */
if (ia_switch_on_eoi &&
- (sctx->b.family == CHIP_HAWAII ||
- (sctx->b.chip_class == VI &&
- (sctx->gs_shader.cso || max_primgroup_in_wave != 2))))
+ (sscreen->b.family == CHIP_HAWAII ||
+ (sscreen->b.chip_class == VI &&
+ (key->u.uses_gs || max_primgroup_in_wave != 2))))
partial_vs_wave = true;
/* Instancing bug on Bonaire. */
- if (sctx->b.family == CHIP_BONAIRE && ia_switch_on_eoi &&
- (info->indirect || info->instance_count > 1))
+ if (sscreen->b.family == CHIP_BONAIRE && ia_switch_on_eoi &&
+ key->u.uses_instancing)
partial_vs_wave = true;
- /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
- * The hw doc says all multi-SE chips are affected, but Vulkan
- * only applies it to Hawaii. Do what Vulkan does.
- */
- if (sctx->b.family == CHIP_HAWAII &&
- sctx->gs_shader.cso &&
- ia_switch_on_eoi &&
- (info->indirect ||
- (info->instance_count > 1 &&
- si_num_prims_for_vertices(info) <= 1)))
- sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
-
-
/* If the WD switch is false, the IA switch must be false too. */
assert(wd_switch_on_eop || !ia_switch_on_eop);
}
if (ia_switch_on_eoi)
partial_es_wave = true;
- /* GS requirement. */
- if (SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
- partial_es_wave = true;
-
return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
- S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) |
- S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
- S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ?
- max_primgroup_in_wave : 0);
+ S_028AA8_WD_SWITCH_ON_EOP(sscreen->b.chip_class >= CIK ? wd_switch_on_eop : 0) |
+ S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->b.chip_class >= VI ?
+ max_primgroup_in_wave : 0) |
+ S_030960_EN_INST_OPT_BASIC(sscreen->b.chip_class >= GFX9) |
+ S_030960_EN_INST_OPT_ADV(sscreen->b.chip_class >= GFX9);
}
-static void si_emit_scratch_reloc(struct si_context *sctx)
+void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ for (int prim = 0; prim <= R600_PRIM_RECTANGLE_LIST; prim++)
+ for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
+ for (int multi_instances = 0; multi_instances < 2; multi_instances++)
+ for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
+ for (int count_from_so = 0; count_from_so < 2; count_from_so++)
+ for (int line_stipple = 0; line_stipple < 2; line_stipple++)
+ for (int uses_tess = 0; uses_tess < 2; uses_tess++)
+ for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
+ for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
+ union si_vgt_param_key key;
+
+ key.index = 0;
+ key.u.prim = prim;
+ key.u.uses_instancing = uses_instancing;
+ key.u.multi_instances_smaller_than_primgroup = multi_instances;
+ key.u.primitive_restart = primitive_restart;
+ key.u.count_from_stream_output = count_from_so;
+ key.u.line_stipple_enabled = line_stipple;
+ key.u.uses_tess = uses_tess;
+ key.u.tcs_tes_uses_prim_id = tess_uses_primid;
+ key.u.uses_gs = uses_gs;
+
+ sctx->ia_multi_vgt_param[key.index] =
+ si_get_init_multi_vgt_param(sctx->screen, &key);
+ }
+}
- if (!sctx->emit_scratch_reloc)
- return;
+static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
+ const struct pipe_draw_info *info,
+ unsigned num_patches)
+{
+ union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
+ unsigned primgroup_size;
+ unsigned ia_multi_vgt_param;
- radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
- sctx->spi_tmpring_size);
+ if (sctx->tes_shader.cso) {
+ primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
+ } else if (sctx->gs_shader.cso) {
+ primgroup_size = 64; /* recommended with a GS */
+ } else {
+ primgroup_size = 128; /* recommended without a GS and tess */
+ }
- if (sctx->scratch_buffer) {
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- sctx->scratch_buffer, RADEON_USAGE_READWRITE,
- RADEON_PRIO_SCRATCH_BUFFER);
+ key.u.prim = info->mode;
+ key.u.uses_instancing = info->indirect || info->instance_count > 1;
+ key.u.multi_instances_smaller_than_primgroup =
+ info->indirect ||
+ (info->instance_count > 1 &&
+ (info->count_from_stream_output ||
+ si_num_prims_for_vertices(info) < primgroup_size));
+ key.u.primitive_restart = info->primitive_restart;
+ key.u.count_from_stream_output = info->count_from_stream_output != NULL;
+
+ ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
+ S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
+
+ if (sctx->gs_shader.cso) {
+ /* GS requirement. */
+ if (SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
+ ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
+ /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
+ * The hw doc says all multi-SE chips are affected, but Vulkan
+ * only applies it to Hawaii. Do what Vulkan does.
+ */
+ if (sctx->b.family == CHIP_HAWAII &&
+ G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
+ (info->indirect ||
+ (info->instance_count > 1 &&
+ (info->count_from_stream_output ||
+ si_num_prims_for_vertices(info) <= 1))))
+ sctx->b.flags |= SI_CONTEXT_VGT_FLUSH;
}
- sctx->emit_scratch_reloc = false;
+
+ return ia_multi_vgt_param;
}
/* rast_prim is the primitive type after GS. */
unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim);
unsigned ia_multi_vgt_param, num_patches = 0;
- /* Polaris needs different VTX_REUSE_DEPTH settings depending on
- * whether the "fractional odd" tessellation spacing is used.
- */
- if (sctx->b.family >= CHIP_POLARIS10) {
- struct si_shader_selector *tes = sctx->tes_shader.cso;
- unsigned vtx_reuse_depth = 30;
-
- if (tes &&
- tes->info.properties[TGSI_PROPERTY_TES_SPACING] ==
- PIPE_TESS_SPACING_FRACTIONAL_ODD)
- vtx_reuse_depth = 14;
-
- if (vtx_reuse_depth != sctx->last_vtx_reuse_depth) {
- radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
- vtx_reuse_depth);
- sctx->last_vtx_reuse_depth = vtx_reuse_depth;
- }
- }
-
if (sctx->tes_shader.cso)
si_emit_derived_tess_state(sctx, info, &num_patches);
/* Draw state. */
if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
- if (sctx->b.chip_class >= CIK)
+ if (sctx->b.chip_class >= GFX9)
+ radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
+ else if (sctx->b.chip_class >= CIK)
radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
else
radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
/* Primitive restart. */
if (info->primitive_restart != sctx->last_primitive_restart_en) {
- radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
+ if (sctx->b.chip_class >= GFX9)
+ radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
+ info->primitive_restart);
+ else
+ radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
+ info->primitive_restart);
+
sctx->last_primitive_restart_en = info->primitive_restart;
}
/* draw packet */
if (info->indexed) {
if (ib->index_size != sctx->last_index_size) {
- radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
+ unsigned index_type;
/* index type */
switch (ib->index_size) {
case 1:
- radeon_emit(cs, V_028A7C_VGT_INDEX_8);
+ index_type = V_028A7C_VGT_INDEX_8;
break;
case 2:
- radeon_emit(cs, V_028A7C_VGT_INDEX_16 |
- (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
- V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
+ index_type = V_028A7C_VGT_INDEX_16 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
break;
case 4:
- radeon_emit(cs, V_028A7C_VGT_INDEX_32 |
- (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
- V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
+ index_type = V_028A7C_VGT_INDEX_32 |
+ (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
+ V_028A7C_VGT_DMA_SWAP_32_BIT : 0);
break;
default:
assert(!"unreachable");
return;
}
+ if (sctx->b.chip_class >= GFX9) {
+ radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
+ 2, index_type);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
+ radeon_emit(cs, index_type);
+ }
+
sctx->last_index_size = ib->index_size;
}
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
radeon_emit(cs, index_max_size);
radeon_emit(cs, index_va);
- radeon_emit(cs, (index_va >> 32UL) & 0xFF);
+ radeon_emit(cs, index_va >> 32);
radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
} else {
{
struct radeon_winsys_cs *cs = rctx->gfx.cs;
- /* ACQUIRE_MEM is only required on a compute ring. */
- radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
- radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
- radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
- radeon_emit(cs, 0); /* CP_COHER_BASE */
- radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ if (rctx->chip_class >= GFX9) {
+ /* Flush caches and wait for the caches to assert idle. */
+ radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
+ radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0xffffff); /* CP_COHER_SIZE_HI */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ } else {
+ /* ACQUIRE_MEM is only required on a compute ring. */
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+ radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
+ }
}
void si_emit_cache_flush(struct si_context *sctx)
struct radeon_winsys_cs *cs = rctx->gfx.cs;
uint32_t cp_coher_cntl = 0;
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER)
+ if (rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
+ SI_CONTEXT_FLUSH_AND_INV_DB))
sctx->b.num_fb_cache_flushes++;
/* SI has a bug that it always flushes ICACHE and KCACHE if either
if (rctx->chip_class == VI)
r600_gfx_write_event_eop(rctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
0, 0, NULL, 0, 0, 0);
+
+ /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
S_0085F0_DB_DEST_BASE_ENA(1);
- }
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
- radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
- /* needed for wait for idle in SURFACE_SYNC */
- assert(rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB);
- }
- if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) {
+ /* Flush HTILE. SURFACE_SYNC will wait for idle. */
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
- /* needed for wait for idle in SURFACE_SYNC */
- assert(rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB);
}
/* Wait for shader engines to go idle.
unsigned *start, unsigned *count)
{
if (info->indirect) {
- struct r600_resource *indirect =
- (struct r600_resource*)info->indirect;
- int *data = r600_buffer_map_sync_with_rings(&sctx->b,
- indirect, PIPE_TRANSFER_READ);
- data += info->indirect_offset/sizeof(int);
- *start = data[2];
- *count = data[0];
+ unsigned indirect_count;
+ struct pipe_transfer *transfer;
+ unsigned begin, end;
+ unsigned map_size;
+ unsigned *data;
+
+ if (info->indirect_params) {
+ data = pipe_buffer_map_range(&sctx->b.b,
+ info->indirect_params,
+ info->indirect_params_offset,
+ sizeof(unsigned),
+ PIPE_TRANSFER_READ, &transfer);
+
+ indirect_count = *data;
+
+ pipe_buffer_unmap(&sctx->b.b, transfer);
+ } else {
+ indirect_count = info->indirect_count;
+ }
+
+ if (!indirect_count) {
+ *start = *count = 0;
+ return;
+ }
+
+ map_size = (indirect_count - 1) * info->indirect_stride + 3 * sizeof(unsigned);
+ data = pipe_buffer_map_range(&sctx->b.b, info->indirect,
+ info->indirect_offset, map_size,
+ PIPE_TRANSFER_READ, &transfer);
+
+ begin = UINT_MAX;
+ end = 0;
+
+ for (unsigned i = 0; i < indirect_count; ++i) {
+ unsigned count = data[0];
+ unsigned start = data[2];
+
+ if (count > 0) {
+ begin = MIN2(begin, start);
+ end = MAX2(end, start + count);
+ }
+
+ data += info->indirect_stride / sizeof(unsigned);
+ }
+
+ pipe_buffer_unmap(&sctx->b.b, transfer);
+
+ if (begin < end) {
+ *start = begin;
+ *count = end - begin;
+ } else {
+ *start = *count = 0;
+ }
} else {
*start = info->start;
*count = info->count;
}
}
-static void cik_prefetch_shader_async(struct si_context *sctx,
- struct si_pm4_state *state)
-{
- if (state) {
- struct pipe_resource *bo = &state->bo[0]->b.b;
- assert(state->nbo == 1);
-
- cik_prefetch_TC_L2_async(sctx, bo, 0, bo->width0);
- }
-}
-
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct pipe_index_buffer ib = {};
- unsigned mask, dirty_fb_counter, dirty_tex_counter, rast_prim;
+ unsigned mask, dirty_tex_counter, rast_prim;
if (likely(!info->indirect)) {
/* SI-CI treat instance_count==0 as instance_count==1. There is
return;
}
- /* Re-emit the framebuffer state if needed. */
- dirty_fb_counter = p_atomic_read(&sctx->b.screen->dirty_fb_counter);
- if (unlikely(dirty_fb_counter != sctx->b.last_dirty_fb_counter)) {
- sctx->b.last_dirty_fb_counter = dirty_fb_counter;
+ /* Recompute and re-emit the texture resource states if needed. */
+ dirty_tex_counter = p_atomic_read(&sctx->b.screen->dirty_tex_counter);
+ if (unlikely(dirty_tex_counter != sctx->b.last_dirty_tex_counter)) {
+ sctx->b.last_dirty_tex_counter = dirty_tex_counter;
sctx->framebuffer.dirty_cbufs |=
((1 << sctx->framebuffer.state.nr_cbufs) - 1);
sctx->framebuffer.dirty_zsbuf = true;
sctx->framebuffer.do_update_surf_dirtiness = true;
si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
- }
-
- /* Invalidate & recompute texture descriptors if needed. */
- dirty_tex_counter = p_atomic_read(&sctx->b.screen->dirty_tex_descriptor_counter);
- if (unlikely(dirty_tex_counter != sctx->b.last_dirty_tex_descriptor_counter)) {
- sctx->b.last_dirty_tex_descriptor_counter = dirty_tex_counter;
si_update_all_texture_descriptors(sctx);
}
/* 8-bit indices are supported on VI. */
if (sctx->b.chip_class <= CIK && ib.index_size == 1) {
struct pipe_resource *out_buffer = NULL;
- unsigned out_offset, start, count, start_offset;
+ unsigned out_offset, start, count, start_offset, size;
void *ptr;
si_get_draw_start_count(sctx, info, &start, &count);
- start_offset = start * ib.index_size;
+ start_offset = start * 2;
+ size = count * 2;
- u_upload_alloc(sctx->b.uploader, start_offset, count * 2, 256,
+ u_upload_alloc(ctx->stream_uploader, start_offset,
+ size,
+ si_optimal_tcc_alignment(sctx, size),
&out_offset, &out_buffer, &ptr);
if (!out_buffer) {
pipe_resource_reference(&ib.buffer, NULL);
return;
}
- util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0,
- ib.offset + start_offset,
+ util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0, 0,
+ ib.offset + start,
count, ptr);
pipe_resource_reference(&ib.buffer, NULL);
si_get_draw_start_count(sctx, info, &start, &count);
start_offset = start * ib.index_size;
- u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size,
- 256, (char*)ib.user_buffer + start_offset,
+ u_upload_data(ctx->stream_uploader, start_offset,
+ count * ib.index_size,
+ sctx->screen->b.info.tcc_cache_line_size,
+ (char*)ib.user_buffer + start_offset,
&ib.offset, &ib.buffer);
if (!ib.buffer)
return;
/* info->start will be added by the drawing code */
ib.offset -= start_offset;
+ } else if (sctx->b.chip_class <= CIK &&
+ r600_resource(ib.buffer)->TC_L2_dirty) {
+ /* VI reads index buffers through TC L2, so it doesn't
+ * need this. */
+ sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ r600_resource(ib.buffer)->TC_L2_dirty = false;
}
}
- /* VI reads index buffers through TC L2. */
- if (info->indexed && sctx->b.chip_class <= CIK &&
- r600_resource(ib.buffer)->TC_L2_dirty) {
- sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(ib.buffer)->TC_L2_dirty = false;
- }
+ if (info->indirect) {
+ /* Add the buffer size for memory checking in need_cs_space. */
+ r600_context_add_resource_size(ctx, info->indirect);
- if (info->indirect && r600_resource(info->indirect)->TC_L2_dirty) {
- sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(info->indirect)->TC_L2_dirty = false;
- }
+ if (r600_resource(info->indirect)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ r600_resource(info->indirect)->TC_L2_dirty = false;
+ }
- if (info->indirect_params &&
- r600_resource(info->indirect_params)->TC_L2_dirty) {
- sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(info->indirect_params)->TC_L2_dirty = false;
+ if (info->indirect_params &&
+ r600_resource(info->indirect_params)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ r600_resource(info->indirect_params)->TC_L2_dirty = false;
+ }
}
- /* Add buffer sizes for memory checking in need_cs_space. */
- if (sctx->emit_scratch_reloc && sctx->scratch_buffer)
- r600_context_add_resource_size(ctx, &sctx->scratch_buffer->b.b);
- if (info->indirect)
- r600_context_add_resource_size(ctx, info->indirect);
-
si_need_cs_space(sctx);
/* Since we've called r600_context_add_resource_size for vertex buffers,
if (!si_upload_vertex_buffer_descriptors(sctx))
return;
- /* Flushed caches prior to prefetching shaders. */
+ /* GFX9 scissor bug workaround. There is also a more efficient but
+ * more involved alternative workaround. */
+ if (sctx->b.chip_class == GFX9 &&
+ si_is_atom_dirty(sctx, &sctx->b.scissors.atom))
+ sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
+
+ /* Flush caches before the first state atom, which does L2 prefetches. */
if (sctx->b.flags)
si_emit_cache_flush(sctx);
- /* Prefetch shaders and VBO descriptors to TC L2. */
- if (sctx->b.chip_class >= CIK) {
- if (si_pm4_state_changed(sctx, ls))
- cik_prefetch_shader_async(sctx, sctx->queued.named.ls);
- if (si_pm4_state_changed(sctx, hs))
- cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
- if (si_pm4_state_changed(sctx, es))
- cik_prefetch_shader_async(sctx, sctx->queued.named.es);
- if (si_pm4_state_changed(sctx, gs))
- cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
- if (si_pm4_state_changed(sctx, vs))
- cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
-
- /* Vertex buffer descriptors are uploaded uncached, so prefetch
- * them right after the VS binary. */
- if (sctx->vertex_buffer_pointer_dirty) {
- cik_prefetch_TC_L2_async(sctx, &sctx->vertex_buffers.buffer->b.b,
- sctx->vertex_buffers.buffer_offset,
- sctx->vertex_elements->count * 16);
- }
- if (si_pm4_state_changed(sctx, ps))
- cik_prefetch_shader_async(sctx, sctx->queued.named.ps);
- }
-
- /* Emit states. */
+ /* Emit state atoms. */
mask = sctx->dirty_atoms;
while (mask) {
struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)];
}
sctx->dirty_atoms = 0;
- si_pm4_emit_dirty(sctx);
- si_emit_scratch_reloc(sctx);
+ /* Emit states. */
+ mask = sctx->dirty_states;
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ struct si_pm4_state *state = sctx->queued.array[i];
+
+ if (!state || sctx->emitted.array[i] == state)
+ continue;
+
+ si_pm4_emit(sctx, state);
+ sctx->emitted.array[i] = state;
+ }
+ sctx->dirty_states = 0;
+
si_emit_rasterizer_prim_state(sctx);
si_emit_draw_registers(sctx, info);
si_ce_pre_draw_synchronization(sctx);
-
si_emit_draw_packets(sctx, info, &ib);
-
si_ce_post_draw_synchronization(sctx);
if (sctx->trace_buf)