struct radv_cmd_state *state = &cmd_buffer->state;
uint32_t mask = state->prefetch_L2_mask;
- if (cmd_buffer->device->physical_device->rad_info.chip_class < CIK)
- return;
-
if (vertex_stage_only) {
/* Fast prefetch path for starting draws as soon as possible.
*/
radv_draw(struct radv_cmd_buffer *cmd_buffer,
const struct radv_draw_info *info)
{
+ bool has_prefetch =
+ cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty =
(cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
cmd_buffer->state.pipeline &&
* run in parallel, but starting the draw first is more
* important.
*/
- if (cmd_buffer->state.prefetch_L2_mask) {
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
radv_emit_prefetch_L2(cmd_buffer,
cmd_buffer->state.pipeline, false);
}
/* Prefetch the remaining shaders after the draw has been
* started.
*/
- if (cmd_buffer->state.prefetch_L2_mask) {
+ if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
radv_emit_prefetch_L2(cmd_buffer,
cmd_buffer->state.pipeline, false);
}
const struct radv_dispatch_info *info)
{
struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ bool has_prefetch =
+ cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
bool pipeline_is_dirty = pipeline &&
pipeline != cmd_buffer->state.emitted_compute_pipeline;
* will run in parallel, but starting the dispatch first is
* more important.
*/
- if (pipeline_is_dirty) {
+ if (has_prefetch && pipeline_is_dirty) {
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_COMPUTE]);
}
*/
si_emit_cache_flush(cmd_buffer);
- if (pipeline_is_dirty) {
+ if (has_prefetch && pipeline_is_dirty) {
radv_emit_shader_prefetch(cmd_buffer,
pipeline->shaders[MESA_SHADER_COMPUTE]);
}