/* GRBM_GFX_INDEX has a different offset on SI and CI+ */
if (physical_device->rad_info.chip_class < CIK)
- radeon_set_config_reg(cs, GRBM_GFX_INDEX,
- SE_INDEX(se) | SH_BROADCAST_WRITES |
- INSTANCE_BROADCAST_WRITES);
+ radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
+ S_00802C_SE_INDEX(se) |
+ S_00802C_SH_BROADCAST_WRITES(1) |
+ S_00802C_INSTANCE_BROADCAST_WRITES(1));
else
radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
S_030800_SE_INDEX(se) | S_030800_SH_BROADCAST_WRITES(1) |
/* GRBM_GFX_INDEX has a different offset on SI and CI+ */
if (physical_device->rad_info.chip_class < CIK)
- radeon_set_config_reg(cs, GRBM_GFX_INDEX,
- SE_BROADCAST_WRITES | SH_BROADCAST_WRITES |
- INSTANCE_BROADCAST_WRITES);
+ radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX,
+ S_00802C_SE_BROADCAST_WRITES(1) |
+ S_00802C_SH_BROADCAST_WRITES(1) |
+ S_00802C_INSTANCE_BROADCAST_WRITES(1));
else
radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX,
S_030800_SE_BROADCAST_WRITES(1) | S_030800_SH_BROADCAST_WRITES(1) |
switch (physical_device->rad_info.family) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
pc_lines = 4096;
break;
case CHIP_RAVEN:
assert(0);
}
- radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL,
- S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF));
- /* TODO: Enable the binner: */
- radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0,
- S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
- S_028C44_DISABLE_START_OF_PRIM(1));
radeon_set_context_reg(cs, R_028C48_PA_SC_BINNER_CNTL_1,
S_028C48_MAX_ALLOC_COUNT(MIN2(128, pc_lines / (4 * num_se))) |
S_028C48_MAX_PRIM_PER_BATCH(1023));
S_028004_ZPASS_INCREMENT_DISABLE(1));
}
+ /* Enable the Polaris small primitive filter control.
+ * XXX: There is possibly an issue when MSAA is off (see RadeonSI
+ * has_msaa_sample_loc_bug). But this doesn't seem to regress anything,
+ * and AMDVLK doesn't have a workaround as well.
+ */
+ if (physical_device->rad_info.family >= CHIP_POLARIS10) {
+ unsigned small_prim_filter_cntl =
+ S_028830_SMALL_PRIM_FILTER_ENABLE(1) |
+ /* Workaround for a hw line bug. */
+ S_028830_LINE_FILTER_DISABLE(physical_device->rad_info.family <= CHIP_POLARIS12);
+
+ radeon_set_context_reg(cs, R_028830_PA_SU_SMALL_PRIM_FILTER_CNTL,
+ small_prim_filter_cntl);
+ }
+
si_emit_compute(physical_device, cs);
}
device->gfx_init = device->ws->buffer_create(device->ws,
cs->cdw * 4, 4096,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_CPU_ACCESS|
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_READ_ONLY);
if (!device->gfx_init)
goto fail;
int i;
float scale[3], translate[3], guardband_x = INFINITY, guardband_y = INFINITY;
const float max_range = 32767.0f;
- assert(count);
+ if (!count)
+ return;
radeon_set_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL + first * 4 * 2, count * 2);
for (i = 0; i < count; i++) {
bool ia_switch_on_eop = false;
bool ia_switch_on_eoi = false;
bool partial_vs_wave = false;
- bool partial_es_wave = cmd_buffer->state.pipeline->graphics.partial_es_wave;
+ bool partial_es_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_es_wave;
bool multi_instances_smaller_than_primgroup;
multi_instances_smaller_than_primgroup = indirect_draw;
if (!multi_instances_smaller_than_primgroup && instanced_draw) {
uint32_t num_prims = radv_prims_for_vertices(&cmd_buffer->state.pipeline->graphics.prim_vertex_count, draw_vertex_count);
- if (num_prims < cmd_buffer->state.pipeline->graphics.primgroup_size)
+ if (num_prims < cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.primgroup_size)
multi_instances_smaller_than_primgroup = true;
}
- ia_switch_on_eoi = cmd_buffer->state.pipeline->graphics.ia_switch_on_eoi;
- partial_vs_wave = cmd_buffer->state.pipeline->graphics.partial_vs_wave;
+ ia_switch_on_eoi = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.ia_switch_on_eoi;
+ partial_vs_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_vs_wave;
if (chip_class >= CIK) {
- wd_switch_on_eop = cmd_buffer->state.pipeline->graphics.wd_switch_on_eop;
+ wd_switch_on_eop = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.wd_switch_on_eop;
/* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
* We don't know that for indirect drawing, so treat it as
}
}
- return cmd_buffer->state.pipeline->graphics.base_ia_multi_vgt_param |
+ return cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.base |
S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
void
si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
- bool predicated,
enum chip_class chip_class,
uint32_t *flush_cnt,
uint64_t flush_va,
/* Necessary for DCC */
if (chip_class >= VI) {
si_cs_emit_write_event_eop(cs,
- predicated,
+ false,
chip_class,
is_mec,
V_028A90_FLUSH_AND_INV_CB_DATA_TS,
}
if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
}
}
if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
if (chip_class >= GFX9 && flush_cb_db) {
unsigned cb_db_event, tc_flags;
+#if 0
+ /* This breaks a bunch of:
+ dEQP-VK.renderpass.dedicated_allocation.formats.d32_sfloat_s8_uint.input*.
+ use the big hammer always.
+ */
/* Set the CB/DB flush event. */
switch (flush_cb_db) {
case RADV_CMD_FLAG_FLUSH_AND_INV_CB:
/* both CB & DB */
cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
}
-
- /* TC | TC_WB = invalidate L2 data
- * TC_MD | TC_WB = invalidate L2 metadata
- * TC | TC_WB | TC_MD = invalidate L2 data & metadata
- *
- * The metadata cache must always be invalidated for coherency
- * between CB/DB and shaders. (metadata = HTILE, CMASK, DCC)
+#else
+ cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
+#endif
+ /* These are the only allowed combinations. If you need to
+ * do multiple operations at once, do them separately.
+ * All operations that invalidate L2 also seem to invalidate
+ * metadata. Volatile (VOL) and WC flushes are not listed here.
*
- * TC must be invalidated on GFX9 only if the CB/DB surface is
- * not pipe-aligned. If the surface is RB-aligned, it might not
- * strictly be pipe-aligned since RB alignment takes precendence.
+ * TC | TC_WB = writeback & invalidate L2 & L1
+ * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
+ * TC_WB | TC_NC = writeback L2 for MTYPE == NC
+ * TC | TC_NC = invalidate L2 for MTYPE == NC
+ * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
+ * TCL1 = invalidate L1
*/
- tc_flags = EVENT_TC_WB_ACTION_ENA |
- EVENT_TC_MD_ACTION_ENA;
+ tc_flags = EVENT_TC_ACTION_ENA |
+ EVENT_TC_MD_ACTION_ENA;
/* Ideally flush TC together with CB/DB. */
if (flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) {
- tc_flags |= EVENT_TC_ACTION_ENA |
- EVENT_TCL1_ACTION_ENA;
+ /* Writeback and invalidate everything in L2 & L1. */
+ tc_flags = EVENT_TC_ACTION_ENA |
+ EVENT_TC_WB_ACTION_ENA;
+
/* Clear the flags. */
flush_bits &= ~(RADV_CMD_FLAG_INV_GLOBAL_L2 |
assert(flush_cnt);
uint32_t old_fence = (*flush_cnt)++;
- si_cs_emit_write_event_eop(cs, predicated, chip_class, false, cb_db_event, tc_flags, 1,
+ si_cs_emit_write_event_eop(cs, false, chip_class, false, cb_db_event, tc_flags, 1,
flush_va, old_fence, *flush_cnt);
- si_emit_wait_fence(cs, predicated, flush_va, *flush_cnt, 0xffffffff);
+ si_emit_wait_fence(cs, false, flush_va, *flush_cnt, 0xffffffff);
}
/* VGT state sync */
if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
- radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
}
RADV_CMD_FLAG_INV_GLOBAL_L2 |
RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) &&
!is_mec) {
- radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, predicated));
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
radeon_emit(cs, 0);
}
if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
(chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
- si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9,
+ si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9,
cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) |
*
* WB doesn't work without NC.
*/
- si_emit_acquire_mem(cs, is_mec, predicated,
+ si_emit_acquire_mem(cs, is_mec, false,
chip_class >= GFX9,
cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) |
}
if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) {
si_emit_acquire_mem(cs, is_mec,
- predicated, chip_class >= GFX9,
+ false, chip_class >= GFX9,
cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0;
* Therefore, it should be last. Done in PFP.
*/
if (cp_coher_cntl)
- si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9, cp_coher_cntl);
+ si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl);
}
void
ptr = &cmd_buffer->gfx9_fence_idx;
}
si_cs_emit_cache_flush(cmd_buffer->cs,
- cmd_buffer->state.predicating,
cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer),
cmd_buffer->state.flush_bits);
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_cmd_buffer_trace_emit(cmd_buffer);
+
cmd_buffer->state.flush_bits = 0;
}
radeon_emit(cs, 0);
}
- radv_cmd_buffer_trace_emit(cmd_buffer);
+ if (unlikely(cmd_buffer->device->trace_bo))
+ radv_cmd_buffer_trace_emit(cmd_buffer);
}
void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,