turnip: Fix vkCmdCopyQueryPoolResults with available flag
[mesa.git] / src / freedreno / vulkan / tu_query.c
index d1abf1ed0cbbbe1ae89f3f4784d3161749524354..7931a449fb5709bfee308d264b7f3507c996a630 100644 (file)
 #include "registers/a6xx.xml.h"
 
 #include "nir/nir_builder.h"
+#include "util/os_time.h"
 
 #include "tu_cs.h"
 
+#define NSEC_PER_SEC 1000000000ull
+#define WAIT_TIMEOUT 5
+
 /* It seems like sample counts need to be copied over to 16-byte aligned
  * memory. */
 struct PACKED slot_value {
@@ -62,6 +66,20 @@ struct PACKED occlusion_query_slot {
 #define occlusion_query_iova(pool, query, field)                     \
    query_iova(struct occlusion_query_slot, pool, query, field)
 
+#define query_is_available(type, slot)                               \
+   ((type*)slot)->available.value
+
+#define occlusion_query_is_available(slot)                           \
+   query_is_available(struct occlusion_query_slot, slot)
+
+/*
+ * Returns a pointer to a given slot in a query pool.
+ */
+static void* slot_address(struct tu_query_pool *pool, uint32_t query)
+{
+   return (char*)pool->bo.map + query * pool->stride;
+}
+
 VkResult
 tu_CreateQueryPool(VkDevice _device,
                    const VkQueryPoolCreateInfo *pCreateInfo,
@@ -132,6 +150,106 @@ tu_DestroyQueryPool(VkDevice _device,
    vk_free2(&device->alloc, pAllocator, pool);
 }
 
+/* Wait on the the availability status of a query up until a timeout. */
+static VkResult
+wait_for_available(struct tu_device *device, struct tu_query_pool *pool,
+                   uint32_t query)
+{
+   /* TODO: Use the MSM_IOVA_WAIT ioctl to wait on the available bit in a
+    * scheduler friendly way instead of busy polling once the patch has landed
+    * upstream. */
+   struct occlusion_query_slot *slot = slot_address(pool, query);
+   uint64_t abs_timeout = os_time_get_absolute_timeout(
+         WAIT_TIMEOUT * NSEC_PER_SEC);
+   while(os_time_get_nano() < abs_timeout) {
+      if (occlusion_query_is_available(slot))
+         return VK_SUCCESS;
+   }
+   return vk_error(device->instance, VK_TIMEOUT);
+}
+
+/* Writes a query value to a buffer from the CPU. */
+static void
+write_query_value_cpu(char* base,
+                      uint32_t offset,
+                      uint64_t value,
+                      VkQueryResultFlags flags)
+{
+   if (flags & VK_QUERY_RESULT_64_BIT) {
+      *(uint64_t*)(base + (offset * sizeof(uint64_t))) = value;
+   } else {
+      *(uint32_t*)(base + (offset * sizeof(uint32_t))) = value;
+   }
+}
+
+static VkResult
+get_occlusion_query_pool_results(struct tu_device *device,
+                                 struct tu_query_pool *pool,
+                                 uint32_t firstQuery,
+                                 uint32_t queryCount,
+                                 size_t dataSize,
+                                 void *pData,
+                                 VkDeviceSize stride,
+                                 VkQueryResultFlags flags)
+{
+   assert(dataSize >= stride * queryCount);
+
+   char *result_base = pData;
+   VkResult result = VK_SUCCESS;
+   for (uint32_t i = 0; i < queryCount; i++) {
+      uint32_t query = firstQuery + i;
+      struct occlusion_query_slot *slot = slot_address(pool, query);
+      bool available = occlusion_query_is_available(slot);
+      if ((flags & VK_QUERY_RESULT_WAIT_BIT) && !available) {
+         VkResult wait_result = wait_for_available(device, pool, query);
+         if (wait_result != VK_SUCCESS)
+            return wait_result;
+         available = true;
+      } else if (!(flags & VK_QUERY_RESULT_PARTIAL_BIT) && !available) {
+         /* From the Vulkan 1.1.130 spec:
+          *
+          *    If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
+          *    both not set then no result values are written to pData for
+          *    queries that are in the unavailable state at the time of the
+          *    call, and vkGetQueryPoolResults returns VK_NOT_READY. However,
+          *    availability state is still written to pData for those queries
+          *    if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
+          */
+         result = VK_NOT_READY;
+         if (!(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
+            result_base += stride;
+            continue;
+         }
+      }
+
+      if (available)
+         write_query_value_cpu(result_base, 0, slot->result.value, flags);
+      else if (flags & VK_QUERY_RESULT_PARTIAL_BIT)
+          /* From the Vulkan 1.1.130 spec:
+           *
+           *   If VK_QUERY_RESULT_PARTIAL_BIT is set, VK_QUERY_RESULT_WAIT_BIT
+           *   is not set, and the query’s status is unavailable, an
+           *   intermediate result value between zero and the final result
+           *   value is written to pData for that query.
+           *
+           * Just return 0 here for simplicity since it's a valid result.
+           */
+         write_query_value_cpu(result_base, 0, 0, flags);
+
+      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
+         /* From the Vulkan 1.1.130 spec:
+          *
+          *    If VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set, the final
+          *    integer value written for each query is non-zero if the query’s
+          *    status was available or zero if the status was unavailable.
+          */
+         write_query_value_cpu(result_base, 1, available, flags);
+
+      result_base += stride;
+   }
+   return result;
+}
+
 VkResult
 tu_GetQueryPoolResults(VkDevice _device,
                        VkQueryPool queryPool,
@@ -142,9 +260,124 @@ tu_GetQueryPoolResults(VkDevice _device,
                        VkDeviceSize stride,
                        VkQueryResultFlags flags)
 {
+   TU_FROM_HANDLE(tu_device, device, _device);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+   assert(firstQuery + queryCount <= pool->size);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION: {
+      return get_occlusion_query_pool_results(device, pool, firstQuery,
+            queryCount, dataSize, pData, stride, flags);
+   }
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
    return VK_SUCCESS;
 }
 
+/* Copies a query value from one buffer to another from the GPU. */
+static void
+copy_query_value_gpu(struct tu_cmd_buffer *cmdbuf,
+                     struct tu_cs *cs,
+                     uint64_t src_iova,
+                     uint64_t base_write_iova,
+                     uint32_t offset,
+                     VkQueryResultFlags flags) {
+   uint32_t element_size = flags & VK_QUERY_RESULT_64_BIT ?
+         sizeof(uint64_t) : sizeof(uint32_t);
+   uint64_t write_iova = base_write_iova + (offset * element_size);
+
+   tu_cs_reserve_space(cmdbuf->device, cs, 6);
+   tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
+   uint32_t mem_to_mem_flags = flags & VK_QUERY_RESULT_64_BIT ?
+         CP_MEM_TO_MEM_0_DOUBLE : 0;
+   tu_cs_emit(cs, mem_to_mem_flags);
+   tu_cs_emit_qw(cs, write_iova);
+   tu_cs_emit_qw(cs, src_iova);
+}
+
+static void
+emit_copy_occlusion_query_pool_results(struct tu_cmd_buffer *cmdbuf,
+                                       struct tu_cs *cs,
+                                       struct tu_query_pool *pool,
+                                       uint32_t firstQuery,
+                                       uint32_t queryCount,
+                                       struct tu_buffer *buffer,
+                                       VkDeviceSize dstOffset,
+                                       VkDeviceSize stride,
+                                       VkQueryResultFlags flags)
+{
+   /* From the Vulkan 1.1.130 spec:
+    *
+    *    vkCmdCopyQueryPoolResults is guaranteed to see the effect of previous
+    *    uses of vkCmdResetQueryPool in the same queue, without any additional
+    *    synchronization.
+    *
+    * To ensure that previous writes to the available bit are coherent, first
+    * wait for all writes to complete.
+    */
+   tu_cs_reserve_space(cmdbuf->device, cs, 1);
+   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
+
+   for (uint32_t i = 0; i < queryCount; i++) {
+      uint32_t query = firstQuery + i;
+      uint64_t available_iova = occlusion_query_iova(pool, query, available);
+      uint64_t result_iova = occlusion_query_iova(pool, query, result);
+      uint64_t buffer_iova = tu_buffer_iova(buffer) + dstOffset + i * stride;
+      /* Wait for the available bit to be set if executed with the
+       * VK_QUERY_RESULT_WAIT_BIT flag. */
+      if (flags & VK_QUERY_RESULT_WAIT_BIT) {
+         tu_cs_reserve_space(cmdbuf->device, cs, 7);
+         tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
+                        CP_WAIT_REG_MEM_0_POLL_MEMORY);
+         tu_cs_emit_qw(cs, available_iova);
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0x1));
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
+      }
+
+      if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
+         /* Unconditionally copying the bo->result into the buffer here is
+          * valid because we only set bo->result on vkCmdEndQuery. Thus, even
+          * if the query is unavailable, this will copy the correct partial
+          * value of 0.
+          */
+         copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
+                              0 /* offset */, flags);
+      } else {
+         /* Conditionally copy bo->result into the buffer based on whether the
+          * query is available.
+          *
+          * NOTE: For the conditional packets to be executed, CP_COND_EXEC
+          * tests that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests
+          * that 0 < available < 2, aka available == 1.
+          */
+         tu_cs_reserve_space(cmdbuf->device, cs, 7);
+         tu_cs_emit_pkt7(cs, CP_COND_EXEC, 6);
+         tu_cs_emit_qw(cs, available_iova);
+         tu_cs_emit_qw(cs, available_iova);
+         tu_cs_emit(cs, CP_COND_EXEC_4_REF(0x2));
+         tu_cs_emit(cs, 6); /* Cond execute the next 6 DWORDS */
+
+         /* Start of conditional execution */
+         copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
+                              0 /* offset */, flags);
+         /* End of conditional execution */
+      }
+
+      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
+         copy_query_value_gpu(cmdbuf, cs, available_iova, buffer_iova,
+                              1 /* offset */, flags);
+      }
+   }
+
+   tu_bo_list_add(&cmdbuf->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
+}
+
 void
 tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
                            VkQueryPool queryPool,
@@ -155,6 +388,46 @@ tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
                            VkDeviceSize stride,
                            VkQueryResultFlags flags)
 {
+   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+   TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
+   struct tu_cs *cs = &cmdbuf->cs;
+   assert(firstQuery + queryCount <= pool->size);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION: {
+      return emit_copy_occlusion_query_pool_results(cmdbuf, cs, pool,
+            firstQuery, queryCount, buffer, dstOffset, stride, flags);
+   }
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
+}
+
+static void
+emit_reset_occlusion_query_pool(struct tu_cmd_buffer *cmdbuf,
+                                struct tu_query_pool *pool,
+                                uint32_t firstQuery,
+                                uint32_t queryCount)
+{
+   struct tu_cs *cs = &cmdbuf->cs;
+
+   for (uint32_t i = 0; i < queryCount; i++) {
+      uint32_t query = firstQuery + i;
+      uint64_t available_iova = occlusion_query_iova(pool, query, available);
+      uint64_t result_iova = occlusion_query_iova(pool, query, result);
+      tu_cs_reserve_space(cmdbuf->device, cs, 11);
+      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+      tu_cs_emit_qw(cs, available_iova);
+      tu_cs_emit_qw(cs, 0x0);
+
+      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+      tu_cs_emit_qw(cs, result_iova);
+      tu_cs_emit_qw(cs, 0x0);
+   }
 }
 
 void
@@ -163,6 +436,21 @@ tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
                      uint32_t firstQuery,
                      uint32_t queryCount)
 {
+   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      emit_reset_occlusion_query_pool(cmdbuf, pool, firstQuery, queryCount);
+      break;
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
+
+   tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
 }
 
 static void