}
if (sctx->flags)
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
if (!si_switch_compute_shader(sctx, program, &program->shader,
code_object, info->pc))
SI_CONTEXT_CS_PARTIAL_FLUSH |
si_get_flush_flags(sctx, SI_COHERENCY_CB_META, L2_LRU) |
si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_LRU);
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
/* Save states. */
void *saved_cs = sctx->cs_shader_state.program;
* Also wait for the previous CP DMA operations.
*/
if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC) && sctx->flags)
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
if (!(user_flags & SI_CPDMA_SKIP_SYNC_BEFORE) && *is_first &&
!(*packet_flags & CP_DMA_CLEAR))
*/
sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH;
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
util_dynarray_foreach(&sctx->resident_tex_handles,
struct si_texture_handle *, tex_handle) {
/* Invalidate L1 because it doesn't know that L2 changed. */
sctx->flags |= SI_CONTEXT_INV_SCACHE;
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
sctx->bindless_descriptors_dirty = false;
}
/* Wait for draw calls to finish if needed. */
if (wait_flags) {
ctx->flags |= wait_flags;
- si_emit_cache_flush(ctx);
+ ctx->emit_cache_flush(ctx);
}
ctx->gfx_last_ib_is_busy = wait_flags == 0;
goto fail;
/* Initialize context functions used by graphics and compute. */
+ sctx->emit_cache_flush = si_emit_cache_flush;
sctx->b.emit_string_marker = si_emit_string_marker;
sctx->b.set_debug_callback = si_set_debug_callback;
sctx->b.set_log_context = si_set_log_context;
struct pipe_device_reset_callback device_reset_callback;
struct u_log_context *log;
void *query_result_shader;
+
+ void (*emit_cache_flush)(struct si_context *ctx);
+
struct blitter_context *blitter;
void *custom_dsa_flush;
void *custom_blend_resolve;
(flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
sctx->compute_is_busy);
+ assert(sctx->chip_class <= GFX9);
+
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
sctx->num_cb_cache_flushes++;
if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
/* Emit all states except possibly render condition. */
si_emit_all_states(sctx, info, prim, instance_count,
primitive_restart, masked_atoms);
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
/* <-- CUs are idle here. */
if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
* states, and draw at the end.
*/
if (sctx->flags)
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
/* Only prefetch the API VS and VBO descriptors. */
if (sctx->chip_class >= GFX7 && sctx->prefetch_L2_mask)
/* Flush L2, so that we don't just test L2 cache performance. */
if (!test_sdma) {
sctx->flags |= SI_CONTEXT_WB_L2;
- si_emit_cache_flush(sctx);
+ sctx->emit_cache_flush(sctx);
}
ctx->end_query(ctx, q[iter]);