#define NSEC_PER_SEC 1000000000ull
#define WAIT_TIMEOUT 5
-/* Depending on the query type, there might be 2 integer values.
- * eg. VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT
- * values[0] : primitives written, values[1]: primitives generated
- */
-struct PACKED slot_value {
- uint64_t values[2];
+struct PACKED query_slot {
+ uint64_t available;
};
-struct PACKED query_slot {
- struct slot_value available; /* 0 when unavailable, 1 when available */
- struct slot_value result;
+struct PACKED occlusion_slot_value {
+ /* Seems sample counters are placed to be 16-byte aligned
+ * even though this query needs an 8-byte slot. */
+ uint64_t value;
+ uint64_t _padding;
};
struct PACKED occlusion_query_slot {
struct query_slot common;
- struct slot_value begin;
- struct slot_value end;
+ uint64_t result;
+
+ struct occlusion_slot_value begin;
+ struct occlusion_slot_value end;
+};
+
+struct PACKED timestamp_query_slot {
+ struct query_slot common;
+ uint64_t result;
+};
+
+struct PACKED primitive_slot_value {
+ uint64_t values[2];
};
-/* The result of transform feedback queries is two integer values:
- * common.result.values[0] is the count of primitives written,
- * common.result.values[1] is the count of primitives generated.
- * Also a result for each stream is stored at 4 slots respectively.
- */
struct PACKED primitive_query_slot {
struct query_slot common;
- struct slot_value begin[4];
- struct slot_value end[4];
+ /* The result of transform feedback queries is two integer values:
+ * results[0] is the count of primitives written,
+ * results[1] is the count of primitives generated.
+ * Also a result for each stream is stored at 4 slots respectively.
+ */
+ uint64_t results[2];
+
+ /* Primitive counters also need to be 16-byte aligned. */
+ uint64_t _padding;
+
+ struct primitive_slot_value begin[4];
+ struct primitive_slot_value end[4];
};
/* Returns the IOVA of a given uint64_t field in a given slot of a query
* pool. */
-#define query_iova(type, pool, query, field, value_index) \
- pool->bo.iova + pool->stride * query + offsetof(type, field) + \
- offsetof(struct slot_value, values[value_index])
+#define query_iova(type, pool, query, field) \
+ pool->bo.iova + pool->stride * (query) + offsetof(type, field)
#define occlusion_query_iova(pool, query, field) \
- query_iova(struct occlusion_query_slot, pool, query, field, 0)
+ query_iova(struct occlusion_query_slot, pool, query, field)
#define primitive_query_iova(pool, query, field, i) \
- query_iova(struct primitive_query_slot, pool, query, field, i)
+ query_iova(struct primitive_query_slot, pool, query, field) + \
+ offsetof(struct primitive_slot_value, values[i])
#define query_available_iova(pool, query) \
- query_iova(struct query_slot, pool, query, available, 0)
+ query_iova(struct query_slot, pool, query, available)
#define query_result_iova(pool, query, i) \
- query_iova(struct query_slot, pool, query, result, i)
+ pool->bo.iova + pool->stride * (query) + \
+ sizeof(struct query_slot) + sizeof(uint64_t) * i
+
+#define query_result_addr(pool, query, i) \
+ pool->bo.map + pool->stride * query + \
+ sizeof(struct query_slot) + sizeof(uint64_t) * i
-#define query_is_available(slot) slot->available.values[0]
+#define query_is_available(slot) slot->available
/*
* Returns a pointer to a given slot in a query pool.
slot_size = sizeof(struct occlusion_query_slot);
break;
case VK_QUERY_TYPE_TIMESTAMP:
- slot_size = sizeof(struct query_slot);
+ slot_size = sizeof(struct timestamp_query_slot);
break;
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
slot_size = sizeof(struct primitive_query_slot);
}
for (uint32_t k = 0; k < result_count; k++) {
- if (available)
- write_query_value_cpu(result_base, k, slot->result.values[k], flags);
- else if (flags & VK_QUERY_RESULT_PARTIAL_BIT)
+ if (available) {
+ uint64_t *result = query_result_addr(pool, query, k);
+ write_query_value_cpu(result_base, k, *result, flags);
+ } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT)
/* From the Vulkan 1.1.130 spec:
*
* If VK_QUERY_RESULT_PARTIAL_BIT is set, VK_QUERY_RESULT_WAIT_BIT
tu_cs_emit_qw(cs, query_available_iova(pool, query));
tu_cs_emit_qw(cs, 0x0);
- tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
- tu_cs_emit_qw(cs, query_result_iova(pool, query, 0));
- tu_cs_emit_qw(cs, 0x0);
- tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
- tu_cs_emit_qw(cs, query_result_iova(pool, query, 1));
- tu_cs_emit_qw(cs, 0x0);
+ for (uint32_t k = 0; k < get_result_count(pool); k++) {
+ tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+ tu_cs_emit_qw(cs, query_result_iova(pool, query, k));
+ tu_cs_emit_qw(cs, 0x0);
+ }
}
}
tu_cs_emit_qw(cs, 0x1);
}
+/* Implement this bit of spec text from section 17.2 "Query Operation":
+ *
+ * If queries are used while executing a render pass instance that has
+ * multiview enabled, the query uses N consecutive query indices in the
+ * query pool (starting at query) where N is the number of bits set in the
+ * view mask in the subpass the query is used in. How the numerical
+ * results of the query are distributed among the queries is
+ * implementation-dependent. For example, some implementations may write
+ * each view’s results to a distinct query, while other implementations
+ * may write the total result to the first query and write zero to the
+ * other queries. However, the sum of the results in all the queries must
+ * accurately reflect the total result of the query summed over all views.
+ * Applications can sum the results from all the queries to compute the
+ * total result.
+ *
+ * Since we execute all views at once, we write zero to the other queries.
+ * Furthermore, because queries must be reset before use, and we set the
+ * result to 0 in vkCmdResetQueryPool(), we just need to mark it as available.
+ */
+
+static void
+handle_multiview_queries(struct tu_cmd_buffer *cmd,
+ struct tu_query_pool *pool,
+ uint32_t query)
+{
+ if (!cmd->state.pass || !cmd->state.subpass->multiview_mask)
+ return;
+
+ unsigned views = util_bitcount(cmd->state.subpass->multiview_mask);
+ struct tu_cs *cs = &cmd->draw_epilogue_cs;
+
+ for (uint32_t i = 1; i < views; i++) {
+ tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
+ tu_cs_emit_qw(cs, query_available_iova(pool, query + i));
+ tu_cs_emit_qw(cs, 0x1);
+ }
+}
+
void
tu_CmdEndQuery(VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
assert(!"Invalid query type");
}
+ handle_multiview_queries(cmdbuf, pool, query);
+
tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
}
{
TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
- struct tu_cs *cs = cmd->state.pass ? &cmd->draw_epilogue_cs : &cmd->cs;
- /* WFI to get more accurate timestamp */
- tu_cs_emit_wfi(cs);
+ tu_bo_list_add(&cmd->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
+
+ /* Inside a render pass, just write the timestamp multiple times so that
+ * the user gets the last one if we use GMEM. There isn't really much
+ * better we can do, and this seems to be what the blob does too.
+ */
+ struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
+
+ /* Stages that will already have been executed by the time the CP executes
+ * the REG_TO_MEM. DrawIndirect parameters are read by the CP, so the draw
+ * indirect stage counts as top-of-pipe too.
+ */
+ VkPipelineStageFlags top_of_pipe_flags =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+
+ if (pipelineStage & ~top_of_pipe_flags) {
+ /* Execute a WFI so that all commands complete. Note that CP_REG_TO_MEM
+ * does CP_WAIT_FOR_ME internally, which will wait for the WFI to
+ * complete.
+ *
+ * Stalling the CP like this is really unfortunate, but I don't think
+ * there's a better solution that allows all 48 bits of precision
+ * because CP_EVENT_WRITE doesn't support 64-bit timestamps.
+ */
+ tu_cs_emit_wfi(cs);
+ }
tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_ALWAYS_ON_COUNTER_LO) |
CP_REG_TO_MEM_0_64B);
tu_cs_emit_qw(cs, query_result_iova(pool, query, 0));
+ /* Only flag availability once the entire renderpass is done, similar to
+ * the begin/end path.
+ */
+ cs = cmd->state.pass ? &cmd->draw_epilogue_cs : &cmd->cs;
+
tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
tu_cs_emit_qw(cs, query_available_iova(pool, query));
tu_cs_emit_qw(cs, 0x1);
- if (cmd->state.pass) {
- /* TODO: to have useful in-renderpass timestamps:
- * for sysmem path, we can just emit the timestamp in draw_cs,
- * for gmem renderpass, we do something with accumulate,
- * but I'm not sure that would follow the spec
- */
- tu_finishme("CmdWriteTimestam in renderpass not accurate");
- }
+ /* From the spec for vkCmdWriteTimestamp:
+ *
+ * If vkCmdWriteTimestamp is called while executing a render pass
+ * instance that has multiview enabled, the timestamp uses N consecutive
+ * query indices in the query pool (starting at query) where N is the
+ * number of bits set in the view mask of the subpass the command is
+ * executed in. The resulting query values are determined by an
+ * implementation-dependent choice of one of the following behaviors:
+ *
+ * - The first query is a timestamp value and (if more than one bit is
+ * set in the view mask) zero is written to the remaining queries.
+ * If two timestamps are written in the same subpass, the sum of the
+ * execution time of all views between those commands is the
+ * difference between the first query written by each command.
+ *
+ * - All N queries are timestamp values. If two timestamps are written
+ * in the same subpass, the sum of the execution time of all views
+ * between those commands is the sum of the difference between
+ * corresponding queries written by each command. The difference
+ * between corresponding queries may be the execution time of a
+ * single view.
+ *
+ * We execute all views in the same draw call, so we implement the first
+ * option, the same as regular queries.
+ */
+ handle_multiview_queries(cmd, pool, query);
}