return result;
}
+static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
+ struct radv_query_pool *pool)
+{
+ if (cmd_buffer->pending_reset_query) {
+ if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
+ /* Only need to flush caches if the query pool size is
+ * large enough to be resetted using the compute shader
+ * path. Small pools don't need any cache flushes
+ * because we use a CP dma clear.
+ */
+ si_emit_cache_flush(cmd_buffer);
+ }
+ }
+}
+
void radv_CmdCopyQueryPoolResults(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, pool->bo);
radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, dst_buffer->bo);
+ /* From the Vulkan spec 1.1.108:
+ *
+ * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
+ * previous uses of vkCmdResetQueryPool in the same queue, without any
+ * additional synchronization."
+ *
+ * So, we have to flush the caches if the compute shader path was used.
+ */
+ emit_query_flush(cmd_buffer, pool);
+
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
}
}
-static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
- struct radv_query_pool *pool)
-{
- if (cmd_buffer->pending_reset_query) {
- if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
- /* Only need to flush caches if the query pool size is
- * large enough to be resetted using the compute shader
- * path. Small pools don't need any cache flushes
- * because we use a CP dma clear.
- */
- si_emit_cache_flush(cmd_buffer);
- }
- }
-}
-
static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
uint64_t va,
VkQueryType query_type,