turnip: Fix vkCmdCopyQueryPoolResults with available flag
[mesa.git] / src / freedreno / vulkan / tu_query.c
index aaa61f63a9db52ed2ac4e715f9fff2d359029dcf..7931a449fb5709bfee308d264b7f3507c996a630 100644 (file)
 #include <string.h>
 #include <unistd.h>
 
+#include "registers/adreno_pm4.xml.h"
+#include "registers/adreno_common.xml.h"
+#include "registers/a6xx.xml.h"
+
 #include "nir/nir_builder.h"
+#include "util/os_time.h"
+
+#include "tu_cs.h"
+
+#define NSEC_PER_SEC 1000000000ull
+#define WAIT_TIMEOUT 5
 
 /* It seems like sample counts need to be copied over to 16-byte aligned
  * memory. */
@@ -47,6 +57,29 @@ struct PACKED occlusion_query_slot {
    struct slot_value result;
 };
 
+/* Returns the IOVA of a given uint64_t field in a given slot of a query
+ * pool. */
+#define query_iova(type, pool, query, field)                         \
+   pool->bo.iova + pool->stride * query + offsetof(type, field) +    \
+         offsetof(struct slot_value, value)
+
+#define occlusion_query_iova(pool, query, field)                     \
+   query_iova(struct occlusion_query_slot, pool, query, field)
+
+#define query_is_available(type, slot)                               \
+   ((type*)slot)->available.value
+
+#define occlusion_query_is_available(slot)                           \
+   query_is_available(struct occlusion_query_slot, slot)
+
+/*
+ * Returns a pointer to a given slot in a query pool.
+ */
+static void* slot_address(struct tu_query_pool *pool, uint32_t query)
+{
+   return (char*)pool->bo.map + query * pool->stride;
+}
+
 VkResult
 tu_CreateQueryPool(VkDevice _device,
                    const VkQueryPoolCreateInfo *pCreateInfo,
@@ -117,6 +150,106 @@ tu_DestroyQueryPool(VkDevice _device,
    vk_free2(&device->alloc, pAllocator, pool);
 }
 
+/* Wait on the the availability status of a query up until a timeout. */
+static VkResult
+wait_for_available(struct tu_device *device, struct tu_query_pool *pool,
+                   uint32_t query)
+{
+   /* TODO: Use the MSM_IOVA_WAIT ioctl to wait on the available bit in a
+    * scheduler friendly way instead of busy polling once the patch has landed
+    * upstream. */
+   struct occlusion_query_slot *slot = slot_address(pool, query);
+   uint64_t abs_timeout = os_time_get_absolute_timeout(
+         WAIT_TIMEOUT * NSEC_PER_SEC);
+   while(os_time_get_nano() < abs_timeout) {
+      if (occlusion_query_is_available(slot))
+         return VK_SUCCESS;
+   }
+   return vk_error(device->instance, VK_TIMEOUT);
+}
+
+/* Writes a query value to a buffer from the CPU. */
+static void
+write_query_value_cpu(char* base,
+                      uint32_t offset,
+                      uint64_t value,
+                      VkQueryResultFlags flags)
+{
+   if (flags & VK_QUERY_RESULT_64_BIT) {
+      *(uint64_t*)(base + (offset * sizeof(uint64_t))) = value;
+   } else {
+      *(uint32_t*)(base + (offset * sizeof(uint32_t))) = value;
+   }
+}
+
+static VkResult
+get_occlusion_query_pool_results(struct tu_device *device,
+                                 struct tu_query_pool *pool,
+                                 uint32_t firstQuery,
+                                 uint32_t queryCount,
+                                 size_t dataSize,
+                                 void *pData,
+                                 VkDeviceSize stride,
+                                 VkQueryResultFlags flags)
+{
+   assert(dataSize >= stride * queryCount);
+
+   char *result_base = pData;
+   VkResult result = VK_SUCCESS;
+   for (uint32_t i = 0; i < queryCount; i++) {
+      uint32_t query = firstQuery + i;
+      struct occlusion_query_slot *slot = slot_address(pool, query);
+      bool available = occlusion_query_is_available(slot);
+      if ((flags & VK_QUERY_RESULT_WAIT_BIT) && !available) {
+         VkResult wait_result = wait_for_available(device, pool, query);
+         if (wait_result != VK_SUCCESS)
+            return wait_result;
+         available = true;
+      } else if (!(flags & VK_QUERY_RESULT_PARTIAL_BIT) && !available) {
+         /* From the Vulkan 1.1.130 spec:
+          *
+          *    If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
+          *    both not set then no result values are written to pData for
+          *    queries that are in the unavailable state at the time of the
+          *    call, and vkGetQueryPoolResults returns VK_NOT_READY. However,
+          *    availability state is still written to pData for those queries
+          *    if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
+          */
+         result = VK_NOT_READY;
+         if (!(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
+            result_base += stride;
+            continue;
+         }
+      }
+
+      if (available)
+         write_query_value_cpu(result_base, 0, slot->result.value, flags);
+      else if (flags & VK_QUERY_RESULT_PARTIAL_BIT)
+          /* From the Vulkan 1.1.130 spec:
+           *
+           *   If VK_QUERY_RESULT_PARTIAL_BIT is set, VK_QUERY_RESULT_WAIT_BIT
+           *   is not set, and the query’s status is unavailable, an
+           *   intermediate result value between zero and the final result
+           *   value is written to pData for that query.
+           *
+           * Just return 0 here for simplicity since it's a valid result.
+           */
+         write_query_value_cpu(result_base, 0, 0, flags);
+
+      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
+         /* From the Vulkan 1.1.130 spec:
+          *
+          *    If VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set, the final
+          *    integer value written for each query is non-zero if the query’s
+          *    status was available or zero if the status was unavailable.
+          */
+         write_query_value_cpu(result_base, 1, available, flags);
+
+      result_base += stride;
+   }
+   return result;
+}
+
 VkResult
 tu_GetQueryPoolResults(VkDevice _device,
                        VkQueryPool queryPool,
@@ -127,9 +260,124 @@ tu_GetQueryPoolResults(VkDevice _device,
                        VkDeviceSize stride,
                        VkQueryResultFlags flags)
 {
+   TU_FROM_HANDLE(tu_device, device, _device);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+   assert(firstQuery + queryCount <= pool->size);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION: {
+      return get_occlusion_query_pool_results(device, pool, firstQuery,
+            queryCount, dataSize, pData, stride, flags);
+   }
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
    return VK_SUCCESS;
 }
 
+/* Copies a query value from one buffer to another from the GPU. */
+static void
+copy_query_value_gpu(struct tu_cmd_buffer *cmdbuf,
+                     struct tu_cs *cs,
+                     uint64_t src_iova,
+                     uint64_t base_write_iova,
+                     uint32_t offset,
+                     VkQueryResultFlags flags) {
+   uint32_t element_size = flags & VK_QUERY_RESULT_64_BIT ?
+         sizeof(uint64_t) : sizeof(uint32_t);
+   uint64_t write_iova = base_write_iova + (offset * element_size);
+
+   tu_cs_reserve_space(cmdbuf->device, cs, 6);
+   tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
+   uint32_t mem_to_mem_flags = flags & VK_QUERY_RESULT_64_BIT ?
+         CP_MEM_TO_MEM_0_DOUBLE : 0;
+   tu_cs_emit(cs, mem_to_mem_flags);
+   tu_cs_emit_qw(cs, write_iova);
+   tu_cs_emit_qw(cs, src_iova);
+}
+
+static void
+emit_copy_occlusion_query_pool_results(struct tu_cmd_buffer *cmdbuf,
+                                       struct tu_cs *cs,
+                                       struct tu_query_pool *pool,
+                                       uint32_t firstQuery,
+                                       uint32_t queryCount,
+                                       struct tu_buffer *buffer,
+                                       VkDeviceSize dstOffset,
+                                       VkDeviceSize stride,
+                                       VkQueryResultFlags flags)
+{
+   /* From the Vulkan 1.1.130 spec:
+    *
+    *    vkCmdCopyQueryPoolResults is guaranteed to see the effect of previous
+    *    uses of vkCmdResetQueryPool in the same queue, without any additional
+    *    synchronization.
+    *
+    * To ensure that previous writes to the available bit are coherent, first
+    * wait for all writes to complete.
+    */
+   tu_cs_reserve_space(cmdbuf->device, cs, 1);
+   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
+
+   for (uint32_t i = 0; i < queryCount; i++) {
+      uint32_t query = firstQuery + i;
+      uint64_t available_iova = occlusion_query_iova(pool, query, available);
+      uint64_t result_iova = occlusion_query_iova(pool, query, result);
+      uint64_t buffer_iova = tu_buffer_iova(buffer) + dstOffset + i * stride;
+      /* Wait for the available bit to be set if executed with the
+       * VK_QUERY_RESULT_WAIT_BIT flag. */
+      if (flags & VK_QUERY_RESULT_WAIT_BIT) {
+         tu_cs_reserve_space(cmdbuf->device, cs, 7);
+         tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
+                        CP_WAIT_REG_MEM_0_POLL_MEMORY);
+         tu_cs_emit_qw(cs, available_iova);
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0x1));
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
+         tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
+      }
+
+      if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
+         /* Unconditionally copying the bo->result into the buffer here is
+          * valid because we only set bo->result on vkCmdEndQuery. Thus, even
+          * if the query is unavailable, this will copy the correct partial
+          * value of 0.
+          */
+         copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
+                              0 /* offset */, flags);
+      } else {
+         /* Conditionally copy bo->result into the buffer based on whether the
+          * query is available.
+          *
+          * NOTE: For the conditional packets to be executed, CP_COND_EXEC
+          * tests that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests
+          * that 0 < available < 2, aka available == 1.
+          */
+         tu_cs_reserve_space(cmdbuf->device, cs, 7);
+         tu_cs_emit_pkt7(cs, CP_COND_EXEC, 6);
+         tu_cs_emit_qw(cs, available_iova);
+         tu_cs_emit_qw(cs, available_iova);
+         tu_cs_emit(cs, CP_COND_EXEC_4_REF(0x2));
+         tu_cs_emit(cs, 6); /* Cond execute the next 6 DWORDS */
+
+         /* Start of conditional execution */
+         copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
+                              0 /* offset */, flags);
+         /* End of conditional execution */
+      }
+
+      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
+         copy_query_value_gpu(cmdbuf, cs, available_iova, buffer_iova,
+                              1 /* offset */, flags);
+      }
+   }
+
+   tu_bo_list_add(&cmdbuf->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
+}
+
 void
 tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
                            VkQueryPool queryPool,
@@ -140,6 +388,46 @@ tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
                            VkDeviceSize stride,
                            VkQueryResultFlags flags)
 {
+   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+   TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
+   struct tu_cs *cs = &cmdbuf->cs;
+   assert(firstQuery + queryCount <= pool->size);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION: {
+      return emit_copy_occlusion_query_pool_results(cmdbuf, cs, pool,
+            firstQuery, queryCount, buffer, dstOffset, stride, flags);
+   }
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
+}
+
+static void
+emit_reset_occlusion_query_pool(struct tu_cmd_buffer *cmdbuf,
+                                struct tu_query_pool *pool,
+                                uint32_t firstQuery,
+                                uint32_t queryCount)
+{
+   struct tu_cs *cs = &cmdbuf->cs;
+
+   for (uint32_t i = 0; i < queryCount; i++) {
+      uint32_t query = firstQuery + i;
+      uint64_t available_iova = occlusion_query_iova(pool, query, available);
+      uint64_t result_iova = occlusion_query_iova(pool, query, result);
+      tu_cs_reserve_space(cmdbuf->device, cs, 11);
+      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+      tu_cs_emit_qw(cs, available_iova);
+      tu_cs_emit_qw(cs, 0x0);
+
+      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+      tu_cs_emit_qw(cs, result_iova);
+      tu_cs_emit_qw(cs, 0x0);
+   }
 }
 
 void
@@ -148,6 +436,54 @@ tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
                      uint32_t firstQuery,
                      uint32_t queryCount)
 {
+   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      emit_reset_occlusion_query_pool(cmdbuf, pool, firstQuery, queryCount);
+      break;
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
+
+   tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
+}
+
+static void
+emit_begin_occlusion_query(struct tu_cmd_buffer *cmdbuf,
+                           struct tu_query_pool *pool,
+                           uint32_t query)
+{
+   /* From the Vulkan 1.1.130 spec:
+    *
+    *    A query must begin and end inside the same subpass of a render pass
+    *    instance, or must both begin and end outside of a render pass
+    *    instance.
+    *
+    * Unlike on an immediate-mode renderer, Turnip renders all tiles on
+    * vkCmdEndRenderPass, not individually on each vkCmdDraw*. As such, if a
+    * query begins/ends inside the same subpass of a render pass, we need to
+    * record the packets on the secondary draw command stream. cmdbuf->draw_cs
+    * is then run on every tile during render, so we just need to accumulate
+    * sample counts in slot->result to compute the query result.
+    */
+   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
+
+   uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
+
+   tu_cs_reserve_space(cmdbuf->device, cs, 7);
+   tu_cs_emit_regs(cs,
+                   A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
+
+   tu_cs_emit_regs(cs,
+                   A6XX_RB_SAMPLE_COUNT_ADDR_LO(begin_iova));
+
+   tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
+   tu_cs_emit(cs, ZPASS_DONE);
 }
 
 void
@@ -156,6 +492,101 @@ tu_CmdBeginQuery(VkCommandBuffer commandBuffer,
                  uint32_t query,
                  VkQueryControlFlags flags)
 {
+   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+   assert(query < pool->size);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      /* In freedreno, there is no implementation difference between
+       * GL_SAMPLES_PASSED and GL_ANY_SAMPLES_PASSED, so we can similarly
+       * ignore the VK_QUERY_CONTROL_PRECISE_BIT flag here.
+       */
+      emit_begin_occlusion_query(cmdbuf, pool, query);
+      break;
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
+
+   tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
+}
+
+static void
+emit_end_occlusion_query(struct tu_cmd_buffer *cmdbuf,
+                         struct tu_query_pool *pool,
+                         uint32_t query)
+{
+   /* Ending an occlusion query happens in a few steps:
+    *    1) Set the slot->end to UINT64_MAX.
+    *    2) Set up the SAMPLE_COUNT registers and trigger a CP_EVENT_WRITE to
+    *       write the current sample count value into slot->end.
+    *    3) Since (2) is asynchronous, wait until slot->end is not equal to
+    *       UINT64_MAX before continuing via CP_WAIT_REG_MEM.
+    *    4) Accumulate the results of the query (slot->end - slot->begin) into
+    *       slot->result.
+    *    5) If vkCmdEndQuery is *not* called from within the scope of a render
+    *       pass, set the slot's available bit since the query is now done.
+    *    6) If vkCmdEndQuery *is* called from within the scope of a render
+    *       pass, we cannot mark as available yet since the commands in
+    *       draw_cs are not run until vkCmdEndRenderPass.
+    */
+   const struct tu_render_pass *pass = cmdbuf->state.pass;
+   struct tu_cs *cs = pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
+
+   uint64_t available_iova = occlusion_query_iova(pool, query, available);
+   uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
+   uint64_t end_iova = occlusion_query_iova(pool, query, end);
+   uint64_t result_iova = occlusion_query_iova(pool, query, result);
+   tu_cs_reserve_space(cmdbuf->device, cs, 31);
+   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+   tu_cs_emit_qw(cs, end_iova);
+   tu_cs_emit_qw(cs, 0xffffffffffffffffull);
+
+   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
+
+   tu_cs_emit_regs(cs,
+                   A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
+
+   tu_cs_emit_regs(cs,
+                   A6XX_RB_SAMPLE_COUNT_ADDR_LO(end_iova));
+
+   tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
+   tu_cs_emit(cs, ZPASS_DONE);
+
+   tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
+   tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_NE) |
+                  CP_WAIT_REG_MEM_0_POLL_MEMORY);
+   tu_cs_emit_qw(cs, end_iova);
+   tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0xffffffff));
+   tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
+   tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
+
+   /* result (dst) = result (srcA) + end (srcB) - begin (srcC) */
+   tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
+   tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
+   tu_cs_emit_qw(cs, result_iova);
+   tu_cs_emit_qw(cs, result_iova);
+   tu_cs_emit_qw(cs, end_iova);
+   tu_cs_emit_qw(cs, begin_iova);
+
+   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
+
+   if (pass)
+      /* Technically, queries should be tracked per-subpass, but here we track
+       * at the render pass level to simply the code a bit. This is safe
+       * because the only commands that use the available bit are
+       * vkCmdCopyQueryPoolResults and vkCmdResetQueryPool, both of which
+       * cannot be invoked from inside a render pass scope.
+       */
+      cs = &cmdbuf->draw_epilogue_cs;
+
+   tu_cs_reserve_space(cmdbuf->device, cs, 5);
+   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+   tu_cs_emit_qw(cs, available_iova);
+   tu_cs_emit_qw(cs, 0x1);
 }
 
 void
@@ -163,6 +594,22 @@ tu_CmdEndQuery(VkCommandBuffer commandBuffer,
                VkQueryPool queryPool,
                uint32_t query)
 {
+   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
+   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
+   assert(query < pool->size);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      emit_end_occlusion_query(cmdbuf, pool, query);
+      break;
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      unreachable("Unimplemented query type");
+   default:
+      assert(!"Invalid query type");
+   }
+
+   tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
 }
 
 void