if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
+
+ /* Make sure to sync all pending active queries at the end of
+ * command buffer.
+ */
+ cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+
si_emit_cache_flush(cmd_buffer);
}
* Whether a query pool has been resetted and we have to flush caches.
*/
bool pending_reset_query;
+
+ /**
+ * Bitmask of pending active query flushes.
+ */
+ enum radv_cmd_flush_bits active_query_flush_bits;
};
struct radv_image;
? TIMESTAMP_NOT_READY : 0;
uint32_t flush_bits = 0;
+ /* Make sure to sync all previous work if the given command buffer has
+ * pending active queries. Otherwise the GPU might write queries data
+ * after the reset operation.
+ */
+ cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
+
flush_bits |= radv_fill_buffer(cmd_buffer, pool->bo,
firstQuery * pool->stride,
queryCount * pool->stride, value);
default:
unreachable("ending unhandled query type");
}
+
+ cmd_buffer->active_query_flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
+ RADV_CMD_FLAG_INV_GLOBAL_L2 |
+ RADV_CMD_FLAG_INV_VMEM_L1;
}
void radv_CmdBeginQueryIndexedEXT(
if (unlikely(cmd_buffer->device->trace_bo))
radv_cmd_buffer_trace_emit(cmd_buffer);
+ /* Clear the caches that have been flushed to avoid syncing too much
+ * when there is some pending active queries.
+ */
+ cmd_buffer->active_query_flush_bits &= ~cmd_buffer->state.flush_bits;
+
cmd_buffer->state.flush_bits = 0;
/* If the driver used a compute shader for resetting a query pool, it