2 * Copyrigh 2016 Red Hat Inc.
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
26 #include "tu_private.h"
34 #include "registers/adreno_pm4.xml.h"
35 #include "registers/adreno_common.xml.h"
36 #include "registers/a6xx.xml.h"
38 #include "nir/nir_builder.h"
39 #include "util/os_time.h"
43 #define NSEC_PER_SEC 1000000000ull
44 #define WAIT_TIMEOUT 5
46 /* It seems like sample counts need to be copied over to 16-byte aligned
48 struct PACKED slot_value
{
53 struct PACKED occlusion_query_slot
{
54 struct slot_value available
; /* 0 when unavailable, 1 when available */
55 struct slot_value begin
;
56 struct slot_value end
;
57 struct slot_value result
;
60 /* Returns the IOVA of a given uint64_t field in a given slot of a query
62 #define query_iova(type, pool, query, field) \
63 pool->bo.iova + pool->stride * query + offsetof(type, field) + \
64 offsetof(struct slot_value, value)
66 #define occlusion_query_iova(pool, query, field) \
67 query_iova(struct occlusion_query_slot, pool, query, field)
69 #define query_is_available(type, slot) \
70 ((type*)slot)->available.value
72 #define occlusion_query_is_available(slot) \
73 query_is_available(struct occlusion_query_slot, slot)
76 * Returns a pointer to a given slot in a query pool.
78 static void* slot_address(struct tu_query_pool
*pool
, uint32_t query
)
80 return (char*)pool
->bo
.map
+ query
* pool
->stride
;
84 tu_CreateQueryPool(VkDevice _device
,
85 const VkQueryPoolCreateInfo
*pCreateInfo
,
86 const VkAllocationCallbacks
*pAllocator
,
87 VkQueryPool
*pQueryPool
)
89 TU_FROM_HANDLE(tu_device
, device
, _device
);
90 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO
);
91 assert(pCreateInfo
->queryCount
> 0);
94 switch (pCreateInfo
->queryType
) {
95 case VK_QUERY_TYPE_OCCLUSION
:
96 slot_size
= sizeof(struct occlusion_query_slot
);
98 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
99 case VK_QUERY_TYPE_TIMESTAMP
:
100 unreachable("Unimplemented query type");
102 assert(!"Invalid query type");
105 struct tu_query_pool
*pool
=
106 vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
107 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
110 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
112 VkResult result
= tu_bo_init_new(device
, &pool
->bo
,
113 pCreateInfo
->queryCount
* slot_size
);
114 if (result
!= VK_SUCCESS
) {
115 vk_free2(&device
->alloc
, pAllocator
, pool
);
119 result
= tu_bo_map(device
, &pool
->bo
);
120 if (result
!= VK_SUCCESS
) {
121 tu_bo_finish(device
, &pool
->bo
);
122 vk_free2(&device
->alloc
, pAllocator
, pool
);
126 /* Initialize all query statuses to unavailable */
127 memset(pool
->bo
.map
, 0, pool
->bo
.size
);
129 pool
->type
= pCreateInfo
->queryType
;
130 pool
->stride
= slot_size
;
131 pool
->size
= pCreateInfo
->queryCount
;
132 pool
->pipeline_statistics
= pCreateInfo
->pipelineStatistics
;
133 *pQueryPool
= tu_query_pool_to_handle(pool
);
139 tu_DestroyQueryPool(VkDevice _device
,
141 const VkAllocationCallbacks
*pAllocator
)
143 TU_FROM_HANDLE(tu_device
, device
, _device
);
144 TU_FROM_HANDLE(tu_query_pool
, pool
, _pool
);
149 tu_bo_finish(device
, &pool
->bo
);
150 vk_free2(&device
->alloc
, pAllocator
, pool
);
153 /* Wait on the the availability status of a query up until a timeout. */
155 wait_for_available(struct tu_device
*device
, struct tu_query_pool
*pool
,
158 /* TODO: Use the MSM_IOVA_WAIT ioctl to wait on the available bit in a
159 * scheduler friendly way instead of busy polling once the patch has landed
161 struct occlusion_query_slot
*slot
= slot_address(pool
, query
);
162 uint64_t abs_timeout
= os_time_get_absolute_timeout(
163 WAIT_TIMEOUT
* NSEC_PER_SEC
);
164 while(os_time_get_nano() < abs_timeout
) {
165 if (occlusion_query_is_available(slot
))
168 return vk_error(device
->instance
, VK_TIMEOUT
);
172 get_occlusion_query_pool_results(struct tu_device
*device
,
173 struct tu_query_pool
*pool
,
179 VkQueryResultFlags flags
)
181 assert(dataSize
>= stride
* queryCount
);
183 char *query_result
= pData
;
184 VkResult result
= VK_SUCCESS
;
185 for (uint32_t i
= 0; i
< queryCount
; i
++) {
186 uint32_t query
= firstQuery
+ i
;
187 struct occlusion_query_slot
*slot
= slot_address(pool
, query
);
188 bool available
= occlusion_query_is_available(slot
);
189 if ((flags
& VK_QUERY_RESULT_WAIT_BIT
) && !available
) {
190 VkResult wait_result
= wait_for_available(device
, pool
, query
);
191 if (wait_result
!= VK_SUCCESS
)
194 } else if (!(flags
& VK_QUERY_RESULT_PARTIAL_BIT
) && !available
) {
195 /* From the Vulkan 1.1.130 spec:
197 * If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
198 * both not set then no result values are written to pData for
199 * queries that are in the unavailable state at the time of the
200 * call, and vkGetQueryPoolResults returns VK_NOT_READY. However,
201 * availability state is still written to pData for those queries
202 * if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
204 result
= VK_NOT_READY
;
205 if (!(flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)) {
206 query_result
+= stride
;
213 value
= slot
->result
.value
;
214 } else if (flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
215 /* From the Vulkan 1.1.130 spec:
217 * If VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set, the final
218 * integer value written for each query is non-zero if the query’s
219 * status was available or zero if the status was unavailable.
222 } else if (flags
& VK_QUERY_RESULT_PARTIAL_BIT
) {
223 /* From the Vulkan 1.1.130 spec:
225 * If VK_QUERY_RESULT_PARTIAL_BIT is set, VK_QUERY_RESULT_WAIT_BIT
226 * is not set, and the query’s status is unavailable, an
227 * intermediate result value between zero and the final result
228 * value is written to pData for that query.
230 * Just return 0 here for simplicity since it's a valid result.
235 if (flags
& VK_QUERY_RESULT_64_BIT
) {
236 *(uint64_t*)query_result
= value
;
238 *(uint32_t*)query_result
= value
;
240 query_result
+= stride
;
246 tu_GetQueryPoolResults(VkDevice _device
,
247 VkQueryPool queryPool
,
253 VkQueryResultFlags flags
)
255 TU_FROM_HANDLE(tu_device
, device
, _device
);
256 TU_FROM_HANDLE(tu_query_pool
, pool
, queryPool
);
257 assert(firstQuery
+ queryCount
<= pool
->size
);
259 switch (pool
->type
) {
260 case VK_QUERY_TYPE_OCCLUSION
: {
261 return get_occlusion_query_pool_results(device
, pool
, firstQuery
,
262 queryCount
, dataSize
, pData
, stride
, flags
);
264 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
265 case VK_QUERY_TYPE_TIMESTAMP
:
266 unreachable("Unimplemented query type");
268 assert(!"Invalid query type");
274 emit_copy_occlusion_query_pool_results(struct tu_cmd_buffer
*cmdbuf
,
276 struct tu_query_pool
*pool
,
279 struct tu_buffer
*buffer
,
280 VkDeviceSize dstOffset
,
282 VkQueryResultFlags flags
)
284 /* From the Vulkan 1.1.130 spec:
286 * vkCmdCopyQueryPoolResults is guaranteed to see the effect of previous
287 * uses of vkCmdResetQueryPool in the same queue, without any additional
290 * To ensure that previous writes to the available bit are coherent, first
291 * wait for all writes to complete.
293 tu_cs_reserve_space(cmdbuf
->device
, cs
, 1);
294 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
296 for (uint32_t i
= 0; i
< queryCount
; i
++) {
297 uint32_t query
= firstQuery
+ i
;
298 uint64_t available_iova
= occlusion_query_iova(pool
, query
, available
);
299 uint64_t result_iova
= occlusion_query_iova(pool
, query
, result
);
300 uint64_t buffer_iova
= tu_buffer_iova(buffer
) + dstOffset
+ i
* stride
;
301 /* Wait for the available bit to be set if executed with the
302 * VK_QUERY_RESULT_WAIT_BIT flag. */
303 if (flags
& VK_QUERY_RESULT_WAIT_BIT
) {
304 tu_cs_reserve_space(cmdbuf
->device
, cs
, 7);
305 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
306 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ
) |
307 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
308 tu_cs_emit_qw(cs
, available_iova
);
309 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(0x1));
310 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
311 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
314 /* If the query result is available, conditionally emit a packet to copy
315 * the result (bo->result) into the buffer.
317 * NOTE: For the conditional packet to be executed, CP_COND_EXEC tests
318 * that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests that
319 * 0 < available < 2, aka available == 1.
321 tu_cs_reserve_space(cmdbuf
->device
, cs
, 13);
322 tu_cs_emit_pkt7(cs
, CP_COND_EXEC
, 6);
323 tu_cs_emit_qw(cs
, available_iova
);
324 tu_cs_emit_qw(cs
, available_iova
);
325 tu_cs_emit(cs
, CP_COND_EXEC_4_REF(0x2));
326 tu_cs_emit(cs
, 6); /* Conditionally execute the next 6 DWORDS */
328 /* Start of conditional execution */
329 tu_cs_emit_pkt7(cs
, CP_MEM_TO_MEM
, 5);
330 uint32_t mem_to_mem_flags
= flags
& VK_QUERY_RESULT_64_BIT
?
331 CP_MEM_TO_MEM_0_DOUBLE
: 0;
332 tu_cs_emit(cs
, mem_to_mem_flags
);
333 tu_cs_emit_qw(cs
, buffer_iova
);
334 tu_cs_emit_qw(cs
, result_iova
);
335 /* End of conditional execution */
337 /* Like in the case of vkGetQueryPoolResults, copying the results of an
338 * unavailable query with the VK_QUERY_RESULT_WITH_AVAILABILITY_BIT or
339 * VK_QUERY_RESULT_PARTIAL_BIT flags will return 0. */
340 if (flags
& (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
|
341 VK_QUERY_RESULT_PARTIAL_BIT
)) {
342 if (flags
& VK_QUERY_RESULT_64_BIT
) {
343 tu_cs_reserve_space(cmdbuf
->device
, cs
, 10);
344 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 9);
346 tu_cs_reserve_space(cmdbuf
->device
, cs
, 9);
347 tu_cs_emit_pkt7(cs
, CP_COND_WRITE5
, 8);
349 tu_cs_emit(cs
, CP_COND_WRITE5_0_FUNCTION(WRITE_EQ
) |
350 CP_COND_WRITE5_0_POLL_MEMORY
|
351 CP_COND_WRITE5_0_WRITE_MEMORY
);
352 tu_cs_emit_qw(cs
, available_iova
);
353 tu_cs_emit(cs
, CP_COND_WRITE5_3_REF(0));
354 tu_cs_emit(cs
, CP_COND_WRITE5_4_MASK(~0));
355 tu_cs_emit_qw(cs
, buffer_iova
);
356 if (flags
& VK_QUERY_RESULT_64_BIT
) {
357 tu_cs_emit_qw(cs
, 0);
364 tu_bo_list_add(&cmdbuf
->bo_list
, buffer
->bo
, MSM_SUBMIT_BO_WRITE
);
368 tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer
,
369 VkQueryPool queryPool
,
373 VkDeviceSize dstOffset
,
375 VkQueryResultFlags flags
)
377 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, commandBuffer
);
378 TU_FROM_HANDLE(tu_query_pool
, pool
, queryPool
);
379 TU_FROM_HANDLE(tu_buffer
, buffer
, dstBuffer
);
380 struct tu_cs
*cs
= &cmdbuf
->cs
;
381 assert(firstQuery
+ queryCount
<= pool
->size
);
383 switch (pool
->type
) {
384 case VK_QUERY_TYPE_OCCLUSION
: {
385 return emit_copy_occlusion_query_pool_results(cmdbuf
, cs
, pool
,
386 firstQuery
, queryCount
, buffer
, dstOffset
, stride
, flags
);
388 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
389 case VK_QUERY_TYPE_TIMESTAMP
:
390 unreachable("Unimplemented query type");
392 assert(!"Invalid query type");
397 emit_reset_occlusion_query_pool(struct tu_cmd_buffer
*cmdbuf
,
398 struct tu_query_pool
*pool
,
402 struct tu_cs
*cs
= &cmdbuf
->cs
;
404 for (uint32_t i
= 0; i
< queryCount
; i
++) {
405 uint32_t query
= firstQuery
+ i
;
406 uint64_t available_iova
= occlusion_query_iova(pool
, query
, available
);
407 uint64_t result_iova
= occlusion_query_iova(pool
, query
, result
);
408 tu_cs_reserve_space(cmdbuf
->device
, cs
, 11);
409 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 4);
410 tu_cs_emit_qw(cs
, available_iova
);
411 tu_cs_emit_qw(cs
, 0x0);
413 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 4);
414 tu_cs_emit_qw(cs
, result_iova
);
415 tu_cs_emit_qw(cs
, 0x0);
420 tu_CmdResetQueryPool(VkCommandBuffer commandBuffer
,
421 VkQueryPool queryPool
,
425 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, commandBuffer
);
426 TU_FROM_HANDLE(tu_query_pool
, pool
, queryPool
);
428 switch (pool
->type
) {
429 case VK_QUERY_TYPE_OCCLUSION
:
430 emit_reset_occlusion_query_pool(cmdbuf
, pool
, firstQuery
, queryCount
);
432 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
433 case VK_QUERY_TYPE_TIMESTAMP
:
434 unreachable("Unimplemented query type");
436 assert(!"Invalid query type");
439 tu_bo_list_add(&cmdbuf
->bo_list
, &pool
->bo
, MSM_SUBMIT_BO_WRITE
);
443 emit_begin_occlusion_query(struct tu_cmd_buffer
*cmdbuf
,
444 struct tu_query_pool
*pool
,
447 /* From the Vulkan 1.1.130 spec:
449 * A query must begin and end inside the same subpass of a render pass
450 * instance, or must both begin and end outside of a render pass
453 * Unlike on an immediate-mode renderer, Turnip renders all tiles on
454 * vkCmdEndRenderPass, not individually on each vkCmdDraw*. As such, if a
455 * query begins/ends inside the same subpass of a render pass, we need to
456 * record the packets on the secondary draw command stream. cmdbuf->draw_cs
457 * is then run on every tile during render, so we just need to accumulate
458 * sample counts in slot->result to compute the query result.
460 struct tu_cs
*cs
= cmdbuf
->state
.pass
? &cmdbuf
->draw_cs
: &cmdbuf
->cs
;
462 uint64_t begin_iova
= occlusion_query_iova(pool
, query
, begin
);
464 tu_cs_reserve_space(cmdbuf
->device
, cs
, 7);
466 A6XX_RB_SAMPLE_COUNT_CONTROL(.copy
= true));
469 A6XX_RB_SAMPLE_COUNT_ADDR_LO(begin_iova
));
471 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
472 tu_cs_emit(cs
, ZPASS_DONE
);
476 tu_CmdBeginQuery(VkCommandBuffer commandBuffer
,
477 VkQueryPool queryPool
,
479 VkQueryControlFlags flags
)
481 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, commandBuffer
);
482 TU_FROM_HANDLE(tu_query_pool
, pool
, queryPool
);
483 assert(query
< pool
->size
);
485 switch (pool
->type
) {
486 case VK_QUERY_TYPE_OCCLUSION
:
487 /* In freedreno, there is no implementation difference between
488 * GL_SAMPLES_PASSED and GL_ANY_SAMPLES_PASSED, so we can similarly
489 * ignore the VK_QUERY_CONTROL_PRECISE_BIT flag here.
491 emit_begin_occlusion_query(cmdbuf
, pool
, query
);
493 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
494 case VK_QUERY_TYPE_TIMESTAMP
:
495 unreachable("Unimplemented query type");
497 assert(!"Invalid query type");
500 tu_bo_list_add(&cmdbuf
->bo_list
, &pool
->bo
, MSM_SUBMIT_BO_WRITE
);
504 emit_end_occlusion_query(struct tu_cmd_buffer
*cmdbuf
,
505 struct tu_query_pool
*pool
,
508 /* Ending an occlusion query happens in a few steps:
509 * 1) Set the slot->end to UINT64_MAX.
510 * 2) Set up the SAMPLE_COUNT registers and trigger a CP_EVENT_WRITE to
511 * write the current sample count value into slot->end.
512 * 3) Since (2) is asynchronous, wait until slot->end is not equal to
513 * UINT64_MAX before continuing via CP_WAIT_REG_MEM.
514 * 4) Accumulate the results of the query (slot->end - slot->begin) into
516 * 5) If vkCmdEndQuery is *not* called from within the scope of a render
517 * pass, set the slot's available bit since the query is now done.
518 * 6) If vkCmdEndQuery *is* called from within the scope of a render
519 * pass, we cannot mark as available yet since the commands in
520 * draw_cs are not run until vkCmdEndRenderPass.
522 const struct tu_render_pass
*pass
= cmdbuf
->state
.pass
;
523 struct tu_cs
*cs
= pass
? &cmdbuf
->draw_cs
: &cmdbuf
->cs
;
525 uint64_t available_iova
= occlusion_query_iova(pool
, query
, available
);
526 uint64_t begin_iova
= occlusion_query_iova(pool
, query
, begin
);
527 uint64_t end_iova
= occlusion_query_iova(pool
, query
, end
);
528 uint64_t result_iova
= occlusion_query_iova(pool
, query
, result
);
529 tu_cs_reserve_space(cmdbuf
->device
, cs
, 31);
530 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 4);
531 tu_cs_emit_qw(cs
, end_iova
);
532 tu_cs_emit_qw(cs
, 0xffffffffffffffffull
);
534 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
537 A6XX_RB_SAMPLE_COUNT_CONTROL(.copy
= true));
540 A6XX_RB_SAMPLE_COUNT_ADDR_LO(end_iova
));
542 tu_cs_emit_pkt7(cs
, CP_EVENT_WRITE
, 1);
543 tu_cs_emit(cs
, ZPASS_DONE
);
545 tu_cs_emit_pkt7(cs
, CP_WAIT_REG_MEM
, 6);
546 tu_cs_emit(cs
, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_NE
) |
547 CP_WAIT_REG_MEM_0_POLL_MEMORY
);
548 tu_cs_emit_qw(cs
, end_iova
);
549 tu_cs_emit(cs
, CP_WAIT_REG_MEM_3_REF(0xffffffff));
550 tu_cs_emit(cs
, CP_WAIT_REG_MEM_4_MASK(~0));
551 tu_cs_emit(cs
, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
553 /* result (dst) = result (srcA) + end (srcB) - begin (srcC) */
554 tu_cs_emit_pkt7(cs
, CP_MEM_TO_MEM
, 9);
555 tu_cs_emit(cs
, CP_MEM_TO_MEM_0_DOUBLE
| CP_MEM_TO_MEM_0_NEG_C
);
556 tu_cs_emit_qw(cs
, result_iova
);
557 tu_cs_emit_qw(cs
, result_iova
);
558 tu_cs_emit_qw(cs
, end_iova
);
559 tu_cs_emit_qw(cs
, begin_iova
);
561 tu_cs_emit_pkt7(cs
, CP_WAIT_MEM_WRITES
, 0);
564 /* Technically, queries should be tracked per-subpass, but here we track
565 * at the render pass level to simply the code a bit. This is safe
566 * because the only commands that use the available bit are
567 * vkCmdCopyQueryPoolResults and vkCmdResetQueryPool, both of which
568 * cannot be invoked from inside a render pass scope.
570 cs
= &cmdbuf
->draw_epilogue_cs
;
572 tu_cs_reserve_space(cmdbuf
->device
, cs
, 5);
573 tu_cs_emit_pkt7(cs
, CP_MEM_WRITE
, 4);
574 tu_cs_emit_qw(cs
, available_iova
);
575 tu_cs_emit_qw(cs
, 0x1);
579 tu_CmdEndQuery(VkCommandBuffer commandBuffer
,
580 VkQueryPool queryPool
,
583 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, commandBuffer
);
584 TU_FROM_HANDLE(tu_query_pool
, pool
, queryPool
);
585 assert(query
< pool
->size
);
587 switch (pool
->type
) {
588 case VK_QUERY_TYPE_OCCLUSION
:
589 emit_end_occlusion_query(cmdbuf
, pool
, query
);
591 case VK_QUERY_TYPE_PIPELINE_STATISTICS
:
592 case VK_QUERY_TYPE_TIMESTAMP
:
593 unreachable("Unimplemented query type");
595 assert(!"Invalid query type");
598 tu_bo_list_add(&cmdbuf
->bo_list
, &pool
->bo
, MSM_SUBMIT_BO_WRITE
);
602 tu_CmdWriteTimestamp(VkCommandBuffer commandBuffer
,
603 VkPipelineStageFlagBits pipelineStage
,
604 VkQueryPool queryPool
,