/* Prefetch all pipeline shaders at first draw time. */
cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
+ if ((cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI10 ||
+ cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI12 ||
+ cmd_buffer->device->physical_device->rad_info.family == CHIP_NAVI14) &&
+ cmd_buffer->state.emitted_pipeline &&
+ radv_pipeline_has_ngg(cmd_buffer->state.emitted_pipeline) &&
+ !radv_pipeline_has_ngg(cmd_buffer->state.pipeline)) {
+ /* Transitioning from NGG to legacy GS requires
+ * VGT_FLUSH on Navi10-14. VGT_FLUSH is also emitted
+ * at the beginning of IBs when legacy GS ring pointers
+ * are set.
+ */
+ cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VGT_FLUSH;
+ }
+
radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
radv_bind_streamout_state(cmd_buffer, pipeline);
unsigned cb_db_event = 0;
/* We don't need these. */
- assert(!(flush_bits & (RADV_CMD_FLAG_VGT_FLUSH |
- RADV_CMD_FLAG_VGT_STREAMOUT_SYNC)));
+ assert(!(flush_bits & (RADV_CMD_FLAG_VGT_STREAMOUT_SYNC)));
if (flush_bits & RADV_CMD_FLAG_INV_ICACHE)
gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
*flush_cnt, 0xffffffff);
}
+ /* VGT state sync */
+ if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+
/* Ignore fields that only modify the behavior of other fields. */
if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
/* Flush caches and wait for the caches to assert idle.