radeonsi: stop using TGSI_PROPERTY_NEXT_SHADER
[mesa.git] / src / freedreno / vulkan / tu_query.c
1 /*
2 * Copyrigh 2016 Red Hat Inc.
3 * Based on anv:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "tu_private.h"
27
28 #include <assert.h>
29 #include <fcntl.h>
30 #include <stdbool.h>
31 #include <string.h>
32 #include <unistd.h>
33
34 #include "adreno_pm4.xml.h"
35 #include "adreno_common.xml.h"
36 #include "a6xx.xml.h"
37
38 #include "nir/nir_builder.h"
39 #include "util/os_time.h"
40
41 #include "tu_cs.h"
42
43 #define NSEC_PER_SEC 1000000000ull
44 #define WAIT_TIMEOUT 5
45 #define STAT_COUNT ((REG_A6XX_RBBM_PRIMCTR_10_LO - REG_A6XX_RBBM_PRIMCTR_0_LO) / 2 + 1)
46
47 struct PACKED query_slot {
48 uint64_t available;
49 };
50
51 struct PACKED occlusion_slot_value {
52 /* Seems sample counters are placed to be 16-byte aligned
53 * even though this query needs an 8-byte slot. */
54 uint64_t value;
55 uint64_t _padding;
56 };
57
58 struct PACKED occlusion_query_slot {
59 struct query_slot common;
60 uint64_t result;
61
62 struct occlusion_slot_value begin;
63 struct occlusion_slot_value end;
64 };
65
66 struct PACKED timestamp_query_slot {
67 struct query_slot common;
68 uint64_t result;
69 };
70
71 struct PACKED primitive_slot_value {
72 uint64_t values[2];
73 };
74
75 struct PACKED pipeline_stat_query_slot {
76 struct query_slot common;
77 uint64_t results[STAT_COUNT];
78
79 uint64_t begin[STAT_COUNT];
80 uint64_t end[STAT_COUNT];
81 };
82
83 struct PACKED primitive_query_slot {
84 struct query_slot common;
85 /* The result of transform feedback queries is two integer values:
86 * results[0] is the count of primitives written,
87 * results[1] is the count of primitives generated.
88 * Also a result for each stream is stored at 4 slots respectively.
89 */
90 uint64_t results[2];
91
92 /* Primitive counters also need to be 16-byte aligned. */
93 uint64_t _padding;
94
95 struct primitive_slot_value begin[4];
96 struct primitive_slot_value end[4];
97 };
98
99 /* Returns the IOVA of a given uint64_t field in a given slot of a query
100 * pool. */
101 #define query_iova(type, pool, query, field) \
102 pool->bo.iova + pool->stride * (query) + offsetof(type, field)
103
104 #define occlusion_query_iova(pool, query, field) \
105 query_iova(struct occlusion_query_slot, pool, query, field)
106
107 #define pipeline_stat_query_iova(pool, query, field) \
108 pool->bo.iova + pool->stride * query + \
109 offsetof(struct pipeline_stat_query_slot, field)
110
111 #define primitive_query_iova(pool, query, field, i) \
112 query_iova(struct primitive_query_slot, pool, query, field) + \
113 offsetof(struct primitive_slot_value, values[i])
114
115 #define query_available_iova(pool, query) \
116 query_iova(struct query_slot, pool, query, available)
117
118 #define query_result_iova(pool, query, i) \
119 pool->bo.iova + pool->stride * (query) + \
120 sizeof(struct query_slot) + sizeof(uint64_t) * i
121
122 #define query_result_addr(pool, query, i) \
123 pool->bo.map + pool->stride * query + \
124 sizeof(struct query_slot) + sizeof(uint64_t) * i
125
126 #define query_is_available(slot) slot->available
127
128 /*
129 * Returns a pointer to a given slot in a query pool.
130 */
131 static void* slot_address(struct tu_query_pool *pool, uint32_t query)
132 {
133 return (char*)pool->bo.map + query * pool->stride;
134 }
135
136 VkResult
137 tu_CreateQueryPool(VkDevice _device,
138 const VkQueryPoolCreateInfo *pCreateInfo,
139 const VkAllocationCallbacks *pAllocator,
140 VkQueryPool *pQueryPool)
141 {
142 TU_FROM_HANDLE(tu_device, device, _device);
143 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
144 assert(pCreateInfo->queryCount > 0);
145
146 uint32_t slot_size;
147 switch (pCreateInfo->queryType) {
148 case VK_QUERY_TYPE_OCCLUSION:
149 slot_size = sizeof(struct occlusion_query_slot);
150 break;
151 case VK_QUERY_TYPE_TIMESTAMP:
152 slot_size = sizeof(struct timestamp_query_slot);
153 break;
154 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
155 slot_size = sizeof(struct primitive_query_slot);
156 break;
157 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
158 slot_size = sizeof(struct pipeline_stat_query_slot);
159 break;
160 default:
161 assert(!"Invalid query type");
162 }
163
164 struct tu_query_pool *pool =
165 vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
166 VK_OBJECT_TYPE_QUERY_POOL);
167 if (!pool)
168 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
169
170 VkResult result = tu_bo_init_new(device, &pool->bo,
171 pCreateInfo->queryCount * slot_size);
172 if (result != VK_SUCCESS) {
173 vk_object_free(&device->vk, pAllocator, pool);
174 return result;
175 }
176
177 result = tu_bo_map(device, &pool->bo);
178 if (result != VK_SUCCESS) {
179 tu_bo_finish(device, &pool->bo);
180 vk_object_free(&device->vk, pAllocator, pool);
181 return result;
182 }
183
184 /* Initialize all query statuses to unavailable */
185 memset(pool->bo.map, 0, pool->bo.size);
186
187 pool->type = pCreateInfo->queryType;
188 pool->stride = slot_size;
189 pool->size = pCreateInfo->queryCount;
190 pool->pipeline_statistics = pCreateInfo->pipelineStatistics;
191 *pQueryPool = tu_query_pool_to_handle(pool);
192
193 return VK_SUCCESS;
194 }
195
196 void
197 tu_DestroyQueryPool(VkDevice _device,
198 VkQueryPool _pool,
199 const VkAllocationCallbacks *pAllocator)
200 {
201 TU_FROM_HANDLE(tu_device, device, _device);
202 TU_FROM_HANDLE(tu_query_pool, pool, _pool);
203
204 if (!pool)
205 return;
206
207 tu_bo_finish(device, &pool->bo);
208 vk_object_free(&device->vk, pAllocator, pool);
209 }
210
211 static uint32_t
212 get_result_count(struct tu_query_pool *pool)
213 {
214 switch (pool->type) {
215 /* Occulusion and timestamp queries write one integer value */
216 case VK_QUERY_TYPE_OCCLUSION:
217 case VK_QUERY_TYPE_TIMESTAMP:
218 return 1;
219 /* Transform feedback queries write two integer values */
220 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
221 return 2;
222 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
223 return util_bitcount(pool->pipeline_statistics);
224 default:
225 assert(!"Invalid query type");
226 return 0;
227 }
228 }
229
230 static uint32_t
231 statistics_index(uint32_t *statistics)
232 {
233 uint32_t stat;
234 stat = u_bit_scan(statistics);
235
236 switch (1 << stat) {
237 case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT:
238 case VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT:
239 return 0;
240 case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT:
241 return 1;
242 case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT:
243 return 2;
244 case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT:
245 return 4;
246 case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT:
247 return 5;
248 case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT:
249 return 6;
250 case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT:
251 return 7;
252 case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT:
253 return 8;
254 case VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT:
255 return 9;
256 case VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT:
257 return 10;
258 default:
259 return 0;
260 }
261 }
262
263 /* Wait on the the availability status of a query up until a timeout. */
264 static VkResult
265 wait_for_available(struct tu_device *device, struct tu_query_pool *pool,
266 uint32_t query)
267 {
268 /* TODO: Use the MSM_IOVA_WAIT ioctl to wait on the available bit in a
269 * scheduler friendly way instead of busy polling once the patch has landed
270 * upstream. */
271 struct query_slot *slot = slot_address(pool, query);
272 uint64_t abs_timeout = os_time_get_absolute_timeout(
273 WAIT_TIMEOUT * NSEC_PER_SEC);
274 while(os_time_get_nano() < abs_timeout) {
275 if (query_is_available(slot))
276 return VK_SUCCESS;
277 }
278 return vk_error(device->instance, VK_TIMEOUT);
279 }
280
281 /* Writes a query value to a buffer from the CPU. */
282 static void
283 write_query_value_cpu(char* base,
284 uint32_t offset,
285 uint64_t value,
286 VkQueryResultFlags flags)
287 {
288 if (flags & VK_QUERY_RESULT_64_BIT) {
289 *(uint64_t*)(base + (offset * sizeof(uint64_t))) = value;
290 } else {
291 *(uint32_t*)(base + (offset * sizeof(uint32_t))) = value;
292 }
293 }
294
295 static VkResult
296 get_query_pool_results(struct tu_device *device,
297 struct tu_query_pool *pool,
298 uint32_t firstQuery,
299 uint32_t queryCount,
300 size_t dataSize,
301 void *pData,
302 VkDeviceSize stride,
303 VkQueryResultFlags flags)
304 {
305 assert(dataSize >= stride * queryCount);
306
307 char *result_base = pData;
308 VkResult result = VK_SUCCESS;
309 for (uint32_t i = 0; i < queryCount; i++) {
310 uint32_t query = firstQuery + i;
311 struct query_slot *slot = slot_address(pool, query);
312 bool available = query_is_available(slot);
313 uint32_t result_count = get_result_count(pool);
314 uint32_t statistics = pool->pipeline_statistics;
315
316 if ((flags & VK_QUERY_RESULT_WAIT_BIT) && !available) {
317 VkResult wait_result = wait_for_available(device, pool, query);
318 if (wait_result != VK_SUCCESS)
319 return wait_result;
320 available = true;
321 } else if (!(flags & VK_QUERY_RESULT_PARTIAL_BIT) && !available) {
322 /* From the Vulkan 1.1.130 spec:
323 *
324 * If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
325 * both not set then no result values are written to pData for
326 * queries that are in the unavailable state at the time of the
327 * call, and vkGetQueryPoolResults returns VK_NOT_READY. However,
328 * availability state is still written to pData for those queries
329 * if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
330 */
331 result = VK_NOT_READY;
332 if (!(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
333 result_base += stride;
334 continue;
335 }
336 }
337
338 for (uint32_t k = 0; k < result_count; k++) {
339 if (available) {
340 uint64_t *result;
341
342 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
343 uint32_t stat_idx = statistics_index(&statistics);
344 result = query_result_addr(pool, query, stat_idx);
345 } else {
346 result = query_result_addr(pool, query, k);
347 }
348
349 write_query_value_cpu(result_base, k, *result, flags);
350 } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT)
351 /* From the Vulkan 1.1.130 spec:
352 *
353 * If VK_QUERY_RESULT_PARTIAL_BIT is set, VK_QUERY_RESULT_WAIT_BIT
354 * is not set, and the query’s status is unavailable, an
355 * intermediate result value between zero and the final result
356 * value is written to pData for that query.
357 *
358 * Just return 0 here for simplicity since it's a valid result.
359 */
360 write_query_value_cpu(result_base, k, 0, flags);
361 }
362
363 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
364 /* From the Vulkan 1.1.130 spec:
365 *
366 * If VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set, the final
367 * integer value written for each query is non-zero if the query’s
368 * status was available or zero if the status was unavailable.
369 */
370 write_query_value_cpu(result_base, result_count, available, flags);
371
372 result_base += stride;
373 }
374 return result;
375 }
376
377 VkResult
378 tu_GetQueryPoolResults(VkDevice _device,
379 VkQueryPool queryPool,
380 uint32_t firstQuery,
381 uint32_t queryCount,
382 size_t dataSize,
383 void *pData,
384 VkDeviceSize stride,
385 VkQueryResultFlags flags)
386 {
387 TU_FROM_HANDLE(tu_device, device, _device);
388 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
389 assert(firstQuery + queryCount <= pool->size);
390
391 if (tu_device_is_lost(device))
392 return VK_ERROR_DEVICE_LOST;
393
394 switch (pool->type) {
395 case VK_QUERY_TYPE_OCCLUSION:
396 case VK_QUERY_TYPE_TIMESTAMP:
397 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
398 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
399 return get_query_pool_results(device, pool, firstQuery, queryCount,
400 dataSize, pData, stride, flags);
401 default:
402 assert(!"Invalid query type");
403 }
404 return VK_SUCCESS;
405 }
406
407 /* Copies a query value from one buffer to another from the GPU. */
408 static void
409 copy_query_value_gpu(struct tu_cmd_buffer *cmdbuf,
410 struct tu_cs *cs,
411 uint64_t src_iova,
412 uint64_t base_write_iova,
413 uint32_t offset,
414 VkQueryResultFlags flags) {
415 uint32_t element_size = flags & VK_QUERY_RESULT_64_BIT ?
416 sizeof(uint64_t) : sizeof(uint32_t);
417 uint64_t write_iova = base_write_iova + (offset * element_size);
418
419 tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
420 uint32_t mem_to_mem_flags = flags & VK_QUERY_RESULT_64_BIT ?
421 CP_MEM_TO_MEM_0_DOUBLE : 0;
422 tu_cs_emit(cs, mem_to_mem_flags);
423 tu_cs_emit_qw(cs, write_iova);
424 tu_cs_emit_qw(cs, src_iova);
425 }
426
427 static void
428 emit_copy_query_pool_results(struct tu_cmd_buffer *cmdbuf,
429 struct tu_cs *cs,
430 struct tu_query_pool *pool,
431 uint32_t firstQuery,
432 uint32_t queryCount,
433 struct tu_buffer *buffer,
434 VkDeviceSize dstOffset,
435 VkDeviceSize stride,
436 VkQueryResultFlags flags)
437 {
438 /* From the Vulkan 1.1.130 spec:
439 *
440 * vkCmdCopyQueryPoolResults is guaranteed to see the effect of previous
441 * uses of vkCmdResetQueryPool in the same queue, without any additional
442 * synchronization.
443 *
444 * To ensure that previous writes to the available bit are coherent, first
445 * wait for all writes to complete.
446 */
447 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
448
449 for (uint32_t i = 0; i < queryCount; i++) {
450 uint32_t query = firstQuery + i;
451 uint64_t available_iova = query_available_iova(pool, query);
452 uint64_t buffer_iova = tu_buffer_iova(buffer) + dstOffset + i * stride;
453 uint32_t result_count = get_result_count(pool);
454 uint32_t statistics = pool->pipeline_statistics;
455
456 /* Wait for the available bit to be set if executed with the
457 * VK_QUERY_RESULT_WAIT_BIT flag. */
458 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
459 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
460 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
461 CP_WAIT_REG_MEM_0_POLL_MEMORY);
462 tu_cs_emit_qw(cs, available_iova);
463 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0x1));
464 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
465 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
466 }
467
468 for (uint32_t k = 0; k < result_count; k++) {
469 uint64_t result_iova;
470
471 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
472 uint32_t stat_idx = statistics_index(&statistics);
473 result_iova = query_result_iova(pool, query, stat_idx);
474 } else {
475 result_iova = query_result_iova(pool, query, k);
476 }
477
478 if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
479 /* Unconditionally copying the bo->result into the buffer here is
480 * valid because we only set bo->result on vkCmdEndQuery. Thus, even
481 * if the query is unavailable, this will copy the correct partial
482 * value of 0.
483 */
484 copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
485 k /* offset */, flags);
486 } else {
487 /* Conditionally copy bo->result into the buffer based on whether the
488 * query is available.
489 *
490 * NOTE: For the conditional packets to be executed, CP_COND_EXEC
491 * tests that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests
492 * that 0 < available < 2, aka available == 1.
493 */
494 tu_cs_reserve(cs, 7 + 6);
495 tu_cs_emit_pkt7(cs, CP_COND_EXEC, 6);
496 tu_cs_emit_qw(cs, available_iova);
497 tu_cs_emit_qw(cs, available_iova);
498 tu_cs_emit(cs, CP_COND_EXEC_4_REF(0x2));
499 tu_cs_emit(cs, 6); /* Cond execute the next 6 DWORDS */
500
501 /* Start of conditional execution */
502 copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
503 k /* offset */, flags);
504 /* End of conditional execution */
505 }
506 }
507
508 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
509 copy_query_value_gpu(cmdbuf, cs, available_iova, buffer_iova,
510 result_count /* offset */, flags);
511 }
512 }
513
514 tu_bo_list_add(&cmdbuf->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
515 }
516
517 void
518 tu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
519 VkQueryPool queryPool,
520 uint32_t firstQuery,
521 uint32_t queryCount,
522 VkBuffer dstBuffer,
523 VkDeviceSize dstOffset,
524 VkDeviceSize stride,
525 VkQueryResultFlags flags)
526 {
527 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
528 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
529 TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
530 struct tu_cs *cs = &cmdbuf->cs;
531 assert(firstQuery + queryCount <= pool->size);
532
533 switch (pool->type) {
534 case VK_QUERY_TYPE_OCCLUSION:
535 case VK_QUERY_TYPE_TIMESTAMP:
536 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
537 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
538 return emit_copy_query_pool_results(cmdbuf, cs, pool, firstQuery,
539 queryCount, buffer, dstOffset, stride, flags);
540 default:
541 assert(!"Invalid query type");
542 }
543 }
544
545 static void
546 emit_reset_query_pool(struct tu_cmd_buffer *cmdbuf,
547 struct tu_query_pool *pool,
548 uint32_t firstQuery,
549 uint32_t queryCount)
550 {
551 struct tu_cs *cs = &cmdbuf->cs;
552
553 for (uint32_t i = 0; i < queryCount; i++) {
554 uint32_t query = firstQuery + i;
555 uint32_t statistics = pool->pipeline_statistics;
556
557 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
558 tu_cs_emit_qw(cs, query_available_iova(pool, query));
559 tu_cs_emit_qw(cs, 0x0);
560
561 for (uint32_t k = 0; k < get_result_count(pool); k++) {
562 uint64_t result_iova;
563
564 if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
565 uint32_t stat_idx = statistics_index(&statistics);
566 result_iova = query_result_iova(pool, query, stat_idx);
567 } else {
568 result_iova = query_result_iova(pool, query, k);
569 }
570
571 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
572 tu_cs_emit_qw(cs, result_iova);
573 tu_cs_emit_qw(cs, 0x0);
574 }
575 }
576
577 }
578
579 void
580 tu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
581 VkQueryPool queryPool,
582 uint32_t firstQuery,
583 uint32_t queryCount)
584 {
585 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
586 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
587
588 switch (pool->type) {
589 case VK_QUERY_TYPE_TIMESTAMP:
590 case VK_QUERY_TYPE_OCCLUSION:
591 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
592 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
593 emit_reset_query_pool(cmdbuf, pool, firstQuery, queryCount);
594 break;
595 default:
596 assert(!"Invalid query type");
597 }
598
599 tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
600 }
601
602 void
603 tu_ResetQueryPool(VkDevice device,
604 VkQueryPool queryPool,
605 uint32_t firstQuery,
606 uint32_t queryCount)
607 {
608 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
609
610 for (uint32_t i = 0; i < queryCount; i++) {
611 struct query_slot *slot = slot_address(pool, i + firstQuery);
612 slot->available = 0;
613
614 for (uint32_t k = 0; k < get_result_count(pool); k++) {
615 uint64_t *res = query_result_addr(pool, i + firstQuery, k);
616 *res = 0;
617 }
618 }
619 }
620
621 static void
622 emit_begin_occlusion_query(struct tu_cmd_buffer *cmdbuf,
623 struct tu_query_pool *pool,
624 uint32_t query)
625 {
626 /* From the Vulkan 1.1.130 spec:
627 *
628 * A query must begin and end inside the same subpass of a render pass
629 * instance, or must both begin and end outside of a render pass
630 * instance.
631 *
632 * Unlike on an immediate-mode renderer, Turnip renders all tiles on
633 * vkCmdEndRenderPass, not individually on each vkCmdDraw*. As such, if a
634 * query begins/ends inside the same subpass of a render pass, we need to
635 * record the packets on the secondary draw command stream. cmdbuf->draw_cs
636 * is then run on every tile during render, so we just need to accumulate
637 * sample counts in slot->result to compute the query result.
638 */
639 struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
640
641 uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
642
643 tu_cs_emit_regs(cs,
644 A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
645
646 tu_cs_emit_regs(cs,
647 A6XX_RB_SAMPLE_COUNT_ADDR_LO(begin_iova));
648
649 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
650 tu_cs_emit(cs, ZPASS_DONE);
651 }
652
653 static void
654 emit_begin_stat_query(struct tu_cmd_buffer *cmdbuf,
655 struct tu_query_pool *pool,
656 uint32_t query)
657 {
658 struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
659 uint64_t begin_iova = pipeline_stat_query_iova(pool, query, begin);
660
661 tu6_emit_event_write(cmdbuf, cs, START_PRIMITIVE_CTRS);
662 tu6_emit_event_write(cmdbuf, cs, RST_PIX_CNT);
663 tu6_emit_event_write(cmdbuf, cs, TILE_FLUSH);
664
665 tu_cs_emit_wfi(cs);
666
667 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
668 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_RBBM_PRIMCTR_0_LO) |
669 CP_REG_TO_MEM_0_CNT(STAT_COUNT * 2) |
670 CP_REG_TO_MEM_0_64B);
671 tu_cs_emit_qw(cs, begin_iova);
672 }
673
674 static void
675 emit_begin_xfb_query(struct tu_cmd_buffer *cmdbuf,
676 struct tu_query_pool *pool,
677 uint32_t query,
678 uint32_t stream_id)
679 {
680 struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
681 uint64_t begin_iova = primitive_query_iova(pool, query, begin[0], 0);
682
683 tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS_LO(begin_iova));
684 tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS);
685 }
686
687 void
688 tu_CmdBeginQuery(VkCommandBuffer commandBuffer,
689 VkQueryPool queryPool,
690 uint32_t query,
691 VkQueryControlFlags flags)
692 {
693 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
694 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
695 assert(query < pool->size);
696
697 switch (pool->type) {
698 case VK_QUERY_TYPE_OCCLUSION:
699 /* In freedreno, there is no implementation difference between
700 * GL_SAMPLES_PASSED and GL_ANY_SAMPLES_PASSED, so we can similarly
701 * ignore the VK_QUERY_CONTROL_PRECISE_BIT flag here.
702 */
703 emit_begin_occlusion_query(cmdbuf, pool, query);
704 break;
705 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
706 emit_begin_xfb_query(cmdbuf, pool, query, 0);
707 break;
708 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
709 emit_begin_stat_query(cmdbuf, pool, query);
710 break;
711 case VK_QUERY_TYPE_TIMESTAMP:
712 unreachable("Unimplemented query type");
713 default:
714 assert(!"Invalid query type");
715 }
716
717 tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
718 }
719
720 void
721 tu_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,
722 VkQueryPool queryPool,
723 uint32_t query,
724 VkQueryControlFlags flags,
725 uint32_t index)
726 {
727 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
728 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
729 assert(query < pool->size);
730
731 switch (pool->type) {
732 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
733 emit_begin_xfb_query(cmdbuf, pool, query, index);
734 break;
735 default:
736 assert(!"Invalid query type");
737 }
738
739 tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
740 }
741
742 static void
743 emit_end_occlusion_query(struct tu_cmd_buffer *cmdbuf,
744 struct tu_query_pool *pool,
745 uint32_t query)
746 {
747 /* Ending an occlusion query happens in a few steps:
748 * 1) Set the slot->end to UINT64_MAX.
749 * 2) Set up the SAMPLE_COUNT registers and trigger a CP_EVENT_WRITE to
750 * write the current sample count value into slot->end.
751 * 3) Since (2) is asynchronous, wait until slot->end is not equal to
752 * UINT64_MAX before continuing via CP_WAIT_REG_MEM.
753 * 4) Accumulate the results of the query (slot->end - slot->begin) into
754 * slot->result.
755 * 5) If vkCmdEndQuery is *not* called from within the scope of a render
756 * pass, set the slot's available bit since the query is now done.
757 * 6) If vkCmdEndQuery *is* called from within the scope of a render
758 * pass, we cannot mark as available yet since the commands in
759 * draw_cs are not run until vkCmdEndRenderPass.
760 */
761 const struct tu_render_pass *pass = cmdbuf->state.pass;
762 struct tu_cs *cs = pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
763
764 uint64_t available_iova = query_available_iova(pool, query);
765 uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
766 uint64_t end_iova = occlusion_query_iova(pool, query, end);
767 uint64_t result_iova = query_result_iova(pool, query, 0);
768 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
769 tu_cs_emit_qw(cs, end_iova);
770 tu_cs_emit_qw(cs, 0xffffffffffffffffull);
771
772 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
773
774 tu_cs_emit_regs(cs,
775 A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
776
777 tu_cs_emit_regs(cs,
778 A6XX_RB_SAMPLE_COUNT_ADDR_LO(end_iova));
779
780 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
781 tu_cs_emit(cs, ZPASS_DONE);
782
783 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
784 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_NE) |
785 CP_WAIT_REG_MEM_0_POLL_MEMORY);
786 tu_cs_emit_qw(cs, end_iova);
787 tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0xffffffff));
788 tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
789 tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
790
791 /* result (dst) = result (srcA) + end (srcB) - begin (srcC) */
792 tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
793 tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
794 tu_cs_emit_qw(cs, result_iova);
795 tu_cs_emit_qw(cs, result_iova);
796 tu_cs_emit_qw(cs, end_iova);
797 tu_cs_emit_qw(cs, begin_iova);
798
799 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
800
801 if (pass)
802 /* Technically, queries should be tracked per-subpass, but here we track
803 * at the render pass level to simply the code a bit. This is safe
804 * because the only commands that use the available bit are
805 * vkCmdCopyQueryPoolResults and vkCmdResetQueryPool, both of which
806 * cannot be invoked from inside a render pass scope.
807 */
808 cs = &cmdbuf->draw_epilogue_cs;
809
810 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
811 tu_cs_emit_qw(cs, available_iova);
812 tu_cs_emit_qw(cs, 0x1);
813 }
814
815 static void
816 emit_end_stat_query(struct tu_cmd_buffer *cmdbuf,
817 struct tu_query_pool *pool,
818 uint32_t query)
819 {
820 struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
821 uint64_t end_iova = pipeline_stat_query_iova(pool, query, end);
822 uint64_t available_iova = query_available_iova(pool, query);
823 uint64_t result_iova;
824 uint64_t stat_start_iova;
825 uint64_t stat_stop_iova;
826
827 tu6_emit_event_write(cmdbuf, cs, STOP_PRIMITIVE_CTRS);
828 tu6_emit_event_write(cmdbuf, cs, RST_VTX_CNT);
829 tu6_emit_event_write(cmdbuf, cs, STAT_EVENT);
830
831 tu_cs_emit_wfi(cs);
832
833 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
834 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_RBBM_PRIMCTR_0_LO) |
835 CP_REG_TO_MEM_0_CNT(STAT_COUNT * 2) |
836 CP_REG_TO_MEM_0_64B);
837 tu_cs_emit_qw(cs, end_iova);
838
839 for (int i = 0; i < STAT_COUNT; i++) {
840 result_iova = query_result_iova(pool, query, i);
841 stat_start_iova = pipeline_stat_query_iova(pool, query, begin[i]);
842 stat_stop_iova = pipeline_stat_query_iova(pool, query, end[i]);
843
844 tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
845 tu_cs_emit(cs, CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES |
846 CP_MEM_TO_MEM_0_DOUBLE |
847 CP_MEM_TO_MEM_0_NEG_C);
848
849 tu_cs_emit_qw(cs, result_iova);
850 tu_cs_emit_qw(cs, result_iova);
851 tu_cs_emit_qw(cs, stat_stop_iova);
852 tu_cs_emit_qw(cs, stat_start_iova);
853 }
854
855 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
856
857 if (cmdbuf->state.pass)
858 cs = &cmdbuf->draw_epilogue_cs;
859
860 /* Set the availability to 1 */
861 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
862 tu_cs_emit_qw(cs, available_iova);
863 tu_cs_emit_qw(cs, 0x1);
864 }
865
866 static void
867 emit_end_xfb_query(struct tu_cmd_buffer *cmdbuf,
868 struct tu_query_pool *pool,
869 uint32_t query,
870 uint32_t stream_id)
871 {
872 struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
873
874 uint64_t end_iova = primitive_query_iova(pool, query, end[0], 0);
875 uint64_t result_written_iova = query_result_iova(pool, query, 0);
876 uint64_t result_generated_iova = query_result_iova(pool, query, 1);
877 uint64_t begin_written_iova = primitive_query_iova(pool, query, begin[stream_id], 0);
878 uint64_t begin_generated_iova = primitive_query_iova(pool, query, begin[stream_id], 1);
879 uint64_t end_written_iova = primitive_query_iova(pool, query, end[stream_id], 0);
880 uint64_t end_generated_iova = primitive_query_iova(pool, query, end[stream_id], 1);
881 uint64_t available_iova = query_available_iova(pool, query);
882
883 tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS_LO(end_iova));
884 tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS);
885
886 tu_cs_emit_wfi(cs);
887 tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS);
888
889 /* Set the count of written primitives */
890 tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
891 tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C |
892 CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES | 0x80000000);
893 tu_cs_emit_qw(cs, result_written_iova);
894 tu_cs_emit_qw(cs, result_written_iova);
895 tu_cs_emit_qw(cs, end_written_iova);
896 tu_cs_emit_qw(cs, begin_written_iova);
897
898 tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS);
899
900 /* Set the count of generated primitives */
901 tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
902 tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C |
903 CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES | 0x80000000);
904 tu_cs_emit_qw(cs, result_generated_iova);
905 tu_cs_emit_qw(cs, result_generated_iova);
906 tu_cs_emit_qw(cs, end_generated_iova);
907 tu_cs_emit_qw(cs, begin_generated_iova);
908
909 /* Set the availability to 1 */
910 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
911 tu_cs_emit_qw(cs, available_iova);
912 tu_cs_emit_qw(cs, 0x1);
913 }
914
915 /* Implement this bit of spec text from section 17.2 "Query Operation":
916 *
917 * If queries are used while executing a render pass instance that has
918 * multiview enabled, the query uses N consecutive query indices in the
919 * query pool (starting at query) where N is the number of bits set in the
920 * view mask in the subpass the query is used in. How the numerical
921 * results of the query are distributed among the queries is
922 * implementation-dependent. For example, some implementations may write
923 * each view’s results to a distinct query, while other implementations
924 * may write the total result to the first query and write zero to the
925 * other queries. However, the sum of the results in all the queries must
926 * accurately reflect the total result of the query summed over all views.
927 * Applications can sum the results from all the queries to compute the
928 * total result.
929 *
930 * Since we execute all views at once, we write zero to the other queries.
931 * Furthermore, because queries must be reset before use, and we set the
932 * result to 0 in vkCmdResetQueryPool(), we just need to mark it as available.
933 */
934
935 static void
936 handle_multiview_queries(struct tu_cmd_buffer *cmd,
937 struct tu_query_pool *pool,
938 uint32_t query)
939 {
940 if (!cmd->state.pass || !cmd->state.subpass->multiview_mask)
941 return;
942
943 unsigned views = util_bitcount(cmd->state.subpass->multiview_mask);
944 struct tu_cs *cs = &cmd->draw_epilogue_cs;
945
946 for (uint32_t i = 1; i < views; i++) {
947 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
948 tu_cs_emit_qw(cs, query_available_iova(pool, query + i));
949 tu_cs_emit_qw(cs, 0x1);
950 }
951 }
952
953 void
954 tu_CmdEndQuery(VkCommandBuffer commandBuffer,
955 VkQueryPool queryPool,
956 uint32_t query)
957 {
958 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
959 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
960 assert(query < pool->size);
961
962 switch (pool->type) {
963 case VK_QUERY_TYPE_OCCLUSION:
964 emit_end_occlusion_query(cmdbuf, pool, query);
965 break;
966 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
967 emit_end_xfb_query(cmdbuf, pool, query, 0);
968 break;
969 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
970 emit_end_stat_query(cmdbuf, pool, query);
971 break;
972 case VK_QUERY_TYPE_TIMESTAMP:
973 unreachable("Unimplemented query type");
974 default:
975 assert(!"Invalid query type");
976 }
977
978 handle_multiview_queries(cmdbuf, pool, query);
979
980 tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
981 }
982
983 void
984 tu_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,
985 VkQueryPool queryPool,
986 uint32_t query,
987 uint32_t index)
988 {
989 TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
990 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
991 assert(query < pool->size);
992
993 switch (pool->type) {
994 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
995 assert(index <= 4);
996 emit_end_xfb_query(cmdbuf, pool, query, index);
997 break;
998 default:
999 assert(!"Invalid query type");
1000 }
1001
1002 tu_bo_list_add(&cmdbuf->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
1003 }
1004
1005 void
1006 tu_CmdWriteTimestamp(VkCommandBuffer commandBuffer,
1007 VkPipelineStageFlagBits pipelineStage,
1008 VkQueryPool queryPool,
1009 uint32_t query)
1010 {
1011 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1012 TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
1013
1014 tu_bo_list_add(&cmd->bo_list, &pool->bo, MSM_SUBMIT_BO_WRITE);
1015
1016 /* Inside a render pass, just write the timestamp multiple times so that
1017 * the user gets the last one if we use GMEM. There isn't really much
1018 * better we can do, and this seems to be what the blob does too.
1019 */
1020 struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
1021
1022 /* Stages that will already have been executed by the time the CP executes
1023 * the REG_TO_MEM. DrawIndirect parameters are read by the CP, so the draw
1024 * indirect stage counts as top-of-pipe too.
1025 */
1026 VkPipelineStageFlags top_of_pipe_flags =
1027 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
1028 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
1029
1030 if (pipelineStage & ~top_of_pipe_flags) {
1031 /* Execute a WFI so that all commands complete. Note that CP_REG_TO_MEM
1032 * does CP_WAIT_FOR_ME internally, which will wait for the WFI to
1033 * complete.
1034 *
1035 * Stalling the CP like this is really unfortunate, but I don't think
1036 * there's a better solution that allows all 48 bits of precision
1037 * because CP_EVENT_WRITE doesn't support 64-bit timestamps.
1038 */
1039 tu_cs_emit_wfi(cs);
1040 }
1041
1042 tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1043 tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_ALWAYS_ON_COUNTER_LO) |
1044 CP_REG_TO_MEM_0_CNT(2) |
1045 CP_REG_TO_MEM_0_64B);
1046 tu_cs_emit_qw(cs, query_result_iova(pool, query, 0));
1047
1048 /* Only flag availability once the entire renderpass is done, similar to
1049 * the begin/end path.
1050 */
1051 cs = cmd->state.pass ? &cmd->draw_epilogue_cs : &cmd->cs;
1052
1053 tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
1054 tu_cs_emit_qw(cs, query_available_iova(pool, query));
1055 tu_cs_emit_qw(cs, 0x1);
1056
1057 /* From the spec for vkCmdWriteTimestamp:
1058 *
1059 * If vkCmdWriteTimestamp is called while executing a render pass
1060 * instance that has multiview enabled, the timestamp uses N consecutive
1061 * query indices in the query pool (starting at query) where N is the
1062 * number of bits set in the view mask of the subpass the command is
1063 * executed in. The resulting query values are determined by an
1064 * implementation-dependent choice of one of the following behaviors:
1065 *
1066 * - The first query is a timestamp value and (if more than one bit is
1067 * set in the view mask) zero is written to the remaining queries.
1068 * If two timestamps are written in the same subpass, the sum of the
1069 * execution time of all views between those commands is the
1070 * difference between the first query written by each command.
1071 *
1072 * - All N queries are timestamp values. If two timestamps are written
1073 * in the same subpass, the sum of the execution time of all views
1074 * between those commands is the sum of the difference between
1075 * corresponding queries written by each command. The difference
1076 * between corresponding queries may be the execution time of a
1077 * single view.
1078 *
1079 * We execute all views in the same draw call, so we implement the first
1080 * option, the same as regular queries.
1081 */
1082 handle_multiview_queries(cmd, pool, query);
1083 }