2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/os_time.h"
32 #include "util/u_suballoc.h"
33 #include "amd/common/sid.h"
35 #define SI_MAX_STREAMS 4
37 static const struct si_query_ops query_hw_ops
;
39 struct si_hw_query_params
{
40 unsigned start_offset
;
42 unsigned fence_offset
;
47 /* Queries without buffer handling or suspend/resume. */
51 uint64_t begin_result
;
57 /* Fence for GPU_FINISHED. */
58 struct pipe_fence_handle
*fence
;
61 static void si_query_sw_destroy(struct si_screen
*sscreen
,
62 struct si_query
*squery
)
64 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
66 sscreen
->b
.fence_reference(&sscreen
->b
, &query
->fence
, NULL
);
70 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
73 case SI_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
74 case SI_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
75 case SI_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
76 case SI_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
77 case SI_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
78 case SI_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
79 case SI_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
80 case SI_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
81 case SI_QUERY_GFX_BO_LIST_SIZE
: return RADEON_GFX_BO_LIST_COUNTER
;
82 case SI_QUERY_GFX_IB_SIZE
: return RADEON_GFX_IB_SIZE_COUNTER
;
83 case SI_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
84 case SI_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
85 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS
;
86 case SI_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
87 case SI_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
88 case SI_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
89 case SI_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
90 case SI_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
91 case SI_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
92 case SI_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
93 default: unreachable("query type does not correspond to winsys id");
97 static int64_t si_finish_dma_get_cpu_time(struct si_context
*sctx
)
99 struct pipe_fence_handle
*fence
= NULL
;
101 si_flush_dma_cs(sctx
, 0, &fence
);
103 sctx
->ws
->fence_wait(sctx
->ws
, fence
, PIPE_TIMEOUT_INFINITE
);
104 sctx
->ws
->fence_reference(&fence
, NULL
);
107 return os_time_get_nano();
110 static bool si_query_sw_begin(struct si_context
*sctx
,
111 struct si_query
*squery
)
113 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
114 enum radeon_value_id ws_id
;
116 switch(query
->b
.type
) {
117 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
118 case PIPE_QUERY_GPU_FINISHED
:
120 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
121 query
->begin_result
= si_finish_dma_get_cpu_time(sctx
);
123 case SI_QUERY_DRAW_CALLS
:
124 query
->begin_result
= sctx
->num_draw_calls
;
126 case SI_QUERY_DECOMPRESS_CALLS
:
127 query
->begin_result
= sctx
->num_decompress_calls
;
129 case SI_QUERY_MRT_DRAW_CALLS
:
130 query
->begin_result
= sctx
->num_mrt_draw_calls
;
132 case SI_QUERY_PRIM_RESTART_CALLS
:
133 query
->begin_result
= sctx
->num_prim_restart_calls
;
135 case SI_QUERY_SPILL_DRAW_CALLS
:
136 query
->begin_result
= sctx
->num_spill_draw_calls
;
138 case SI_QUERY_COMPUTE_CALLS
:
139 query
->begin_result
= sctx
->num_compute_calls
;
141 case SI_QUERY_SPILL_COMPUTE_CALLS
:
142 query
->begin_result
= sctx
->num_spill_compute_calls
;
144 case SI_QUERY_DMA_CALLS
:
145 query
->begin_result
= sctx
->num_dma_calls
;
147 case SI_QUERY_CP_DMA_CALLS
:
148 query
->begin_result
= sctx
->num_cp_dma_calls
;
150 case SI_QUERY_NUM_VS_FLUSHES
:
151 query
->begin_result
= sctx
->num_vs_flushes
;
153 case SI_QUERY_NUM_PS_FLUSHES
:
154 query
->begin_result
= sctx
->num_ps_flushes
;
156 case SI_QUERY_NUM_CS_FLUSHES
:
157 query
->begin_result
= sctx
->num_cs_flushes
;
159 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
160 query
->begin_result
= sctx
->num_cb_cache_flushes
;
162 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
163 query
->begin_result
= sctx
->num_db_cache_flushes
;
165 case SI_QUERY_NUM_L2_INVALIDATES
:
166 query
->begin_result
= sctx
->num_L2_invalidates
;
168 case SI_QUERY_NUM_L2_WRITEBACKS
:
169 query
->begin_result
= sctx
->num_L2_writebacks
;
171 case SI_QUERY_NUM_RESIDENT_HANDLES
:
172 query
->begin_result
= sctx
->num_resident_handles
;
174 case SI_QUERY_TC_OFFLOADED_SLOTS
:
175 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
177 case SI_QUERY_TC_DIRECT_SLOTS
:
178 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
180 case SI_QUERY_TC_NUM_SYNCS
:
181 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
183 case SI_QUERY_REQUESTED_VRAM
:
184 case SI_QUERY_REQUESTED_GTT
:
185 case SI_QUERY_MAPPED_VRAM
:
186 case SI_QUERY_MAPPED_GTT
:
187 case SI_QUERY_VRAM_USAGE
:
188 case SI_QUERY_VRAM_VIS_USAGE
:
189 case SI_QUERY_GTT_USAGE
:
190 case SI_QUERY_GPU_TEMPERATURE
:
191 case SI_QUERY_CURRENT_GPU_SCLK
:
192 case SI_QUERY_CURRENT_GPU_MCLK
:
193 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
194 case SI_QUERY_NUM_MAPPED_BUFFERS
:
195 query
->begin_result
= 0;
197 case SI_QUERY_BUFFER_WAIT_TIME
:
198 case SI_QUERY_GFX_IB_SIZE
:
199 case SI_QUERY_NUM_GFX_IBS
:
200 case SI_QUERY_NUM_SDMA_IBS
:
201 case SI_QUERY_NUM_BYTES_MOVED
:
202 case SI_QUERY_NUM_EVICTIONS
:
203 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
204 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
205 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
208 case SI_QUERY_GFX_BO_LIST_SIZE
:
209 ws_id
= winsys_id_from_type(query
->b
.type
);
210 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
211 query
->begin_time
= sctx
->ws
->query_value(sctx
->ws
,
214 case SI_QUERY_CS_THREAD_BUSY
:
215 ws_id
= winsys_id_from_type(query
->b
.type
);
216 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
217 query
->begin_time
= os_time_get_nano();
219 case SI_QUERY_GALLIUM_THREAD_BUSY
:
220 query
->begin_result
=
221 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
222 query
->begin_time
= os_time_get_nano();
224 case SI_QUERY_GPU_LOAD
:
225 case SI_QUERY_GPU_SHADERS_BUSY
:
226 case SI_QUERY_GPU_TA_BUSY
:
227 case SI_QUERY_GPU_GDS_BUSY
:
228 case SI_QUERY_GPU_VGT_BUSY
:
229 case SI_QUERY_GPU_IA_BUSY
:
230 case SI_QUERY_GPU_SX_BUSY
:
231 case SI_QUERY_GPU_WD_BUSY
:
232 case SI_QUERY_GPU_BCI_BUSY
:
233 case SI_QUERY_GPU_SC_BUSY
:
234 case SI_QUERY_GPU_PA_BUSY
:
235 case SI_QUERY_GPU_DB_BUSY
:
236 case SI_QUERY_GPU_CP_BUSY
:
237 case SI_QUERY_GPU_CB_BUSY
:
238 case SI_QUERY_GPU_SDMA_BUSY
:
239 case SI_QUERY_GPU_PFP_BUSY
:
240 case SI_QUERY_GPU_MEQ_BUSY
:
241 case SI_QUERY_GPU_ME_BUSY
:
242 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
243 case SI_QUERY_GPU_CP_DMA_BUSY
:
244 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
245 query
->begin_result
= si_begin_counter(sctx
->screen
,
248 case SI_QUERY_NUM_COMPILATIONS
:
249 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
251 case SI_QUERY_NUM_SHADERS_CREATED
:
252 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
254 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
255 query
->begin_result
=
256 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
258 case SI_QUERY_GPIN_ASIC_ID
:
259 case SI_QUERY_GPIN_NUM_SIMD
:
260 case SI_QUERY_GPIN_NUM_RB
:
261 case SI_QUERY_GPIN_NUM_SPI
:
262 case SI_QUERY_GPIN_NUM_SE
:
265 unreachable("si_query_sw_begin: bad query type");
271 static bool si_query_sw_end(struct si_context
*sctx
,
272 struct si_query
*squery
)
274 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
275 enum radeon_value_id ws_id
;
277 switch(query
->b
.type
) {
278 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
280 case PIPE_QUERY_GPU_FINISHED
:
281 sctx
->b
.flush(&sctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
283 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
284 query
->end_result
= si_finish_dma_get_cpu_time(sctx
);
286 case SI_QUERY_DRAW_CALLS
:
287 query
->end_result
= sctx
->num_draw_calls
;
289 case SI_QUERY_DECOMPRESS_CALLS
:
290 query
->end_result
= sctx
->num_decompress_calls
;
292 case SI_QUERY_MRT_DRAW_CALLS
:
293 query
->end_result
= sctx
->num_mrt_draw_calls
;
295 case SI_QUERY_PRIM_RESTART_CALLS
:
296 query
->end_result
= sctx
->num_prim_restart_calls
;
298 case SI_QUERY_SPILL_DRAW_CALLS
:
299 query
->end_result
= sctx
->num_spill_draw_calls
;
301 case SI_QUERY_COMPUTE_CALLS
:
302 query
->end_result
= sctx
->num_compute_calls
;
304 case SI_QUERY_SPILL_COMPUTE_CALLS
:
305 query
->end_result
= sctx
->num_spill_compute_calls
;
307 case SI_QUERY_DMA_CALLS
:
308 query
->end_result
= sctx
->num_dma_calls
;
310 case SI_QUERY_CP_DMA_CALLS
:
311 query
->end_result
= sctx
->num_cp_dma_calls
;
313 case SI_QUERY_NUM_VS_FLUSHES
:
314 query
->end_result
= sctx
->num_vs_flushes
;
316 case SI_QUERY_NUM_PS_FLUSHES
:
317 query
->end_result
= sctx
->num_ps_flushes
;
319 case SI_QUERY_NUM_CS_FLUSHES
:
320 query
->end_result
= sctx
->num_cs_flushes
;
322 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
323 query
->end_result
= sctx
->num_cb_cache_flushes
;
325 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
326 query
->end_result
= sctx
->num_db_cache_flushes
;
328 case SI_QUERY_NUM_L2_INVALIDATES
:
329 query
->end_result
= sctx
->num_L2_invalidates
;
331 case SI_QUERY_NUM_L2_WRITEBACKS
:
332 query
->end_result
= sctx
->num_L2_writebacks
;
334 case SI_QUERY_NUM_RESIDENT_HANDLES
:
335 query
->end_result
= sctx
->num_resident_handles
;
337 case SI_QUERY_TC_OFFLOADED_SLOTS
:
338 query
->end_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
340 case SI_QUERY_TC_DIRECT_SLOTS
:
341 query
->end_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
343 case SI_QUERY_TC_NUM_SYNCS
:
344 query
->end_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
346 case SI_QUERY_REQUESTED_VRAM
:
347 case SI_QUERY_REQUESTED_GTT
:
348 case SI_QUERY_MAPPED_VRAM
:
349 case SI_QUERY_MAPPED_GTT
:
350 case SI_QUERY_VRAM_USAGE
:
351 case SI_QUERY_VRAM_VIS_USAGE
:
352 case SI_QUERY_GTT_USAGE
:
353 case SI_QUERY_GPU_TEMPERATURE
:
354 case SI_QUERY_CURRENT_GPU_SCLK
:
355 case SI_QUERY_CURRENT_GPU_MCLK
:
356 case SI_QUERY_BUFFER_WAIT_TIME
:
357 case SI_QUERY_GFX_IB_SIZE
:
358 case SI_QUERY_NUM_MAPPED_BUFFERS
:
359 case SI_QUERY_NUM_GFX_IBS
:
360 case SI_QUERY_NUM_SDMA_IBS
:
361 case SI_QUERY_NUM_BYTES_MOVED
:
362 case SI_QUERY_NUM_EVICTIONS
:
363 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
364 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
365 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
368 case SI_QUERY_GFX_BO_LIST_SIZE
:
369 ws_id
= winsys_id_from_type(query
->b
.type
);
370 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
371 query
->end_time
= sctx
->ws
->query_value(sctx
->ws
,
374 case SI_QUERY_CS_THREAD_BUSY
:
375 ws_id
= winsys_id_from_type(query
->b
.type
);
376 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
377 query
->end_time
= os_time_get_nano();
379 case SI_QUERY_GALLIUM_THREAD_BUSY
:
381 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
382 query
->end_time
= os_time_get_nano();
384 case SI_QUERY_GPU_LOAD
:
385 case SI_QUERY_GPU_SHADERS_BUSY
:
386 case SI_QUERY_GPU_TA_BUSY
:
387 case SI_QUERY_GPU_GDS_BUSY
:
388 case SI_QUERY_GPU_VGT_BUSY
:
389 case SI_QUERY_GPU_IA_BUSY
:
390 case SI_QUERY_GPU_SX_BUSY
:
391 case SI_QUERY_GPU_WD_BUSY
:
392 case SI_QUERY_GPU_BCI_BUSY
:
393 case SI_QUERY_GPU_SC_BUSY
:
394 case SI_QUERY_GPU_PA_BUSY
:
395 case SI_QUERY_GPU_DB_BUSY
:
396 case SI_QUERY_GPU_CP_BUSY
:
397 case SI_QUERY_GPU_CB_BUSY
:
398 case SI_QUERY_GPU_SDMA_BUSY
:
399 case SI_QUERY_GPU_PFP_BUSY
:
400 case SI_QUERY_GPU_MEQ_BUSY
:
401 case SI_QUERY_GPU_ME_BUSY
:
402 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
403 case SI_QUERY_GPU_CP_DMA_BUSY
:
404 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
405 query
->end_result
= si_end_counter(sctx
->screen
,
407 query
->begin_result
);
408 query
->begin_result
= 0;
410 case SI_QUERY_NUM_COMPILATIONS
:
411 query
->end_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
413 case SI_QUERY_NUM_SHADERS_CREATED
:
414 query
->end_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
416 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
417 query
->end_result
= sctx
->last_tex_ps_draw_ratio
;
419 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
421 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
423 case SI_QUERY_GPIN_ASIC_ID
:
424 case SI_QUERY_GPIN_NUM_SIMD
:
425 case SI_QUERY_GPIN_NUM_RB
:
426 case SI_QUERY_GPIN_NUM_SPI
:
427 case SI_QUERY_GPIN_NUM_SE
:
430 unreachable("si_query_sw_end: bad query type");
436 static bool si_query_sw_get_result(struct si_context
*sctx
,
437 struct si_query
*squery
,
439 union pipe_query_result
*result
)
441 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
443 switch (query
->b
.type
) {
444 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
445 /* Convert from cycles per millisecond to cycles per second (Hz). */
446 result
->timestamp_disjoint
.frequency
=
447 (uint64_t)sctx
->screen
->info
.clock_crystal_freq
* 1000;
448 result
->timestamp_disjoint
.disjoint
= false;
450 case PIPE_QUERY_GPU_FINISHED
: {
451 struct pipe_screen
*screen
= sctx
->b
.screen
;
452 struct pipe_context
*ctx
= squery
->b
.flushed
? NULL
: &sctx
->b
;
454 result
->b
= screen
->fence_finish(screen
, ctx
, query
->fence
,
455 wait
? PIPE_TIMEOUT_INFINITE
: 0);
459 case SI_QUERY_GFX_BO_LIST_SIZE
:
460 result
->u64
= (query
->end_result
- query
->begin_result
) /
461 (query
->end_time
- query
->begin_time
);
463 case SI_QUERY_CS_THREAD_BUSY
:
464 case SI_QUERY_GALLIUM_THREAD_BUSY
:
465 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
466 (query
->end_time
- query
->begin_time
);
468 case SI_QUERY_GPIN_ASIC_ID
:
471 case SI_QUERY_GPIN_NUM_SIMD
:
472 result
->u32
= sctx
->screen
->info
.num_good_compute_units
;
474 case SI_QUERY_GPIN_NUM_RB
:
475 result
->u32
= sctx
->screen
->info
.num_render_backends
;
477 case SI_QUERY_GPIN_NUM_SPI
:
478 result
->u32
= 1; /* all supported chips have one SPI per SE */
480 case SI_QUERY_GPIN_NUM_SE
:
481 result
->u32
= sctx
->screen
->info
.max_se
;
485 result
->u64
= query
->end_result
- query
->begin_result
;
487 switch (query
->b
.type
) {
488 case SI_QUERY_BUFFER_WAIT_TIME
:
489 case SI_QUERY_GPU_TEMPERATURE
:
492 case SI_QUERY_CURRENT_GPU_SCLK
:
493 case SI_QUERY_CURRENT_GPU_MCLK
:
494 result
->u64
*= 1000000;
502 static const struct si_query_ops sw_query_ops
= {
503 .destroy
= si_query_sw_destroy
,
504 .begin
= si_query_sw_begin
,
505 .end
= si_query_sw_end
,
506 .get_result
= si_query_sw_get_result
,
507 .get_result_resource
= NULL
510 static struct pipe_query
*si_query_sw_create(unsigned query_type
)
512 struct si_query_sw
*query
;
514 query
= CALLOC_STRUCT(si_query_sw
);
518 query
->b
.type
= query_type
;
519 query
->b
.ops
= &sw_query_ops
;
521 return (struct pipe_query
*)query
;
524 void si_query_buffer_destroy(struct si_screen
*sscreen
, struct si_query_buffer
*buffer
)
526 struct si_query_buffer
*prev
= buffer
->previous
;
528 /* Release all query buffers. */
530 struct si_query_buffer
*qbuf
= prev
;
531 prev
= prev
->previous
;
532 si_resource_reference(&qbuf
->buf
, NULL
);
536 si_resource_reference(&buffer
->buf
, NULL
);
539 void si_query_buffer_reset(struct si_context
*sctx
, struct si_query_buffer
*buffer
)
541 /* Discard all query buffers except for the oldest. */
542 while (buffer
->previous
) {
543 struct si_query_buffer
*qbuf
= buffer
->previous
;
544 buffer
->previous
= qbuf
->previous
;
546 si_resource_reference(&buffer
->buf
, NULL
);
547 buffer
->buf
= qbuf
->buf
; /* move ownership */
550 buffer
->results_end
= 0;
555 /* Discard even the oldest buffer if it can't be mapped without a stall. */
556 if (si_rings_is_buffer_referenced(sctx
, buffer
->buf
->buf
, RADEON_USAGE_READWRITE
) ||
557 !sctx
->ws
->buffer_wait(buffer
->buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
558 si_resource_reference(&buffer
->buf
, NULL
);
560 buffer
->unprepared
= true;
564 bool si_query_buffer_alloc(struct si_context
*sctx
, struct si_query_buffer
*buffer
,
565 bool (*prepare_buffer
)(struct si_context
*, struct si_query_buffer
*),
568 bool unprepared
= buffer
->unprepared
;
569 buffer
->unprepared
= false;
571 if (!buffer
->buf
|| buffer
->results_end
+ size
> buffer
->buf
->b
.b
.width0
) {
573 struct si_query_buffer
*qbuf
= MALLOC_STRUCT(si_query_buffer
);
574 memcpy(qbuf
, buffer
, sizeof(*qbuf
));
575 buffer
->previous
= qbuf
;
577 buffer
->results_end
= 0;
579 /* Queries are normally read by the CPU after
580 * being written by the gpu, hence staging is probably a good
583 struct si_screen
*screen
= sctx
->screen
;
584 unsigned buf_size
= MAX2(size
, screen
->info
.min_alloc_size
);
585 buffer
->buf
= si_resource(
586 pipe_buffer_create(&screen
->b
, 0, PIPE_USAGE_STAGING
, buf_size
));
587 if (unlikely(!buffer
->buf
))
592 if (unprepared
&& prepare_buffer
) {
593 if (unlikely(!prepare_buffer(sctx
, buffer
))) {
594 si_resource_reference(&buffer
->buf
, NULL
);
603 void si_query_hw_destroy(struct si_screen
*sscreen
,
604 struct si_query
*squery
)
606 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
608 si_query_buffer_destroy(sscreen
, &query
->buffer
);
609 si_resource_reference(&query
->workaround_buf
, NULL
);
613 static bool si_query_hw_prepare_buffer(struct si_context
*sctx
,
614 struct si_query_buffer
*qbuf
)
616 static const struct si_query_hw si_query_hw_s
;
617 struct si_query_hw
*query
= container_of(qbuf
, &si_query_hw_s
, buffer
);
618 struct si_screen
*screen
= sctx
->screen
;
620 /* The caller ensures that the buffer is currently unused by the GPU. */
621 uint32_t *results
= screen
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
,
622 PIPE_TRANSFER_WRITE
|
623 PIPE_TRANSFER_UNSYNCHRONIZED
);
627 memset(results
, 0, qbuf
->buf
->b
.b
.width0
);
629 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
630 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
631 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
632 unsigned max_rbs
= screen
->info
.num_render_backends
;
633 unsigned enabled_rb_mask
= screen
->info
.enabled_rb_mask
;
634 unsigned num_results
;
637 /* Set top bits for unused backends. */
638 num_results
= qbuf
->buf
->b
.b
.width0
/ query
->result_size
;
639 for (j
= 0; j
< num_results
; j
++) {
640 for (i
= 0; i
< max_rbs
; i
++) {
641 if (!(enabled_rb_mask
& (1<<i
))) {
642 results
[(i
* 4)+1] = 0x80000000;
643 results
[(i
* 4)+3] = 0x80000000;
646 results
+= 4 * max_rbs
;
653 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
654 struct si_query
*squery
,
656 enum pipe_query_value_type result_type
,
658 struct pipe_resource
*resource
,
661 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
662 struct si_query_hw
*query
,
663 struct si_resource
*buffer
,
665 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
666 struct si_query_hw
*query
,
667 struct si_resource
*buffer
,
669 static void si_query_hw_add_result(struct si_screen
*sscreen
,
670 struct si_query_hw
*, void *buffer
,
671 union pipe_query_result
*result
);
672 static void si_query_hw_clear_result(struct si_query_hw
*,
673 union pipe_query_result
*);
675 static struct si_query_hw_ops query_hw_default_hw_ops
= {
676 .prepare_buffer
= si_query_hw_prepare_buffer
,
677 .emit_start
= si_query_hw_do_emit_start
,
678 .emit_stop
= si_query_hw_do_emit_stop
,
679 .clear_result
= si_query_hw_clear_result
,
680 .add_result
= si_query_hw_add_result
,
683 static struct pipe_query
*si_query_hw_create(struct si_screen
*sscreen
,
687 struct si_query_hw
*query
= CALLOC_STRUCT(si_query_hw
);
691 query
->b
.type
= query_type
;
692 query
->b
.ops
= &query_hw_ops
;
693 query
->ops
= &query_hw_default_hw_ops
;
695 switch (query_type
) {
696 case PIPE_QUERY_OCCLUSION_COUNTER
:
697 case PIPE_QUERY_OCCLUSION_PREDICATE
:
698 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
699 query
->result_size
= 16 * sscreen
->info
.num_render_backends
;
700 query
->result_size
+= 16; /* for the fence + alignment */
701 query
->b
.num_cs_dw_suspend
= 6 + si_cp_write_fence_dwords(sscreen
);
703 case SI_QUERY_TIME_ELAPSED_SDMA
:
704 /* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
705 query
->result_size
= 64;
707 case PIPE_QUERY_TIME_ELAPSED
:
708 query
->result_size
= 24;
709 query
->b
.num_cs_dw_suspend
= 8 + si_cp_write_fence_dwords(sscreen
);
711 case PIPE_QUERY_TIMESTAMP
:
712 query
->result_size
= 16;
713 query
->b
.num_cs_dw_suspend
= 8 + si_cp_write_fence_dwords(sscreen
);
714 query
->flags
= SI_QUERY_HW_FLAG_NO_START
;
716 case PIPE_QUERY_PRIMITIVES_EMITTED
:
717 case PIPE_QUERY_PRIMITIVES_GENERATED
:
718 case PIPE_QUERY_SO_STATISTICS
:
719 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
720 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
721 query
->result_size
= 32;
722 query
->b
.num_cs_dw_suspend
= 6;
723 query
->stream
= index
;
725 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
726 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
727 query
->result_size
= 32 * SI_MAX_STREAMS
;
728 query
->b
.num_cs_dw_suspend
= 6 * SI_MAX_STREAMS
;
730 case PIPE_QUERY_PIPELINE_STATISTICS
:
731 /* 11 values on GCN. */
732 query
->result_size
= 11 * 16;
733 query
->result_size
+= 8; /* for the fence + alignment */
734 query
->b
.num_cs_dw_suspend
= 6 + si_cp_write_fence_dwords(sscreen
);
742 return (struct pipe_query
*)query
;
745 static void si_update_occlusion_query_state(struct si_context
*sctx
,
746 unsigned type
, int diff
)
748 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
749 type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
750 type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
751 bool old_enable
= sctx
->num_occlusion_queries
!= 0;
752 bool old_perfect_enable
=
753 sctx
->num_perfect_occlusion_queries
!= 0;
754 bool enable
, perfect_enable
;
756 sctx
->num_occlusion_queries
+= diff
;
757 assert(sctx
->num_occlusion_queries
>= 0);
759 if (type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
760 sctx
->num_perfect_occlusion_queries
+= diff
;
761 assert(sctx
->num_perfect_occlusion_queries
>= 0);
764 enable
= sctx
->num_occlusion_queries
!= 0;
765 perfect_enable
= sctx
->num_perfect_occlusion_queries
!= 0;
767 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
768 si_set_occlusion_query_state(sctx
, old_perfect_enable
);
773 static unsigned event_type_for_stream(unsigned stream
)
777 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
778 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
779 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
780 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
784 static void emit_sample_streamout(struct radeon_cmdbuf
*cs
, uint64_t va
,
787 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
788 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(stream
)) | EVENT_INDEX(3));
790 radeon_emit(cs
, va
>> 32);
793 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
794 struct si_query_hw
*query
,
795 struct si_resource
*buffer
,
798 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
800 switch (query
->b
.type
) {
801 case SI_QUERY_TIME_ELAPSED_SDMA
:
802 si_dma_emit_timestamp(sctx
, buffer
, va
- buffer
->gpu_address
);
804 case PIPE_QUERY_OCCLUSION_COUNTER
:
805 case PIPE_QUERY_OCCLUSION_PREDICATE
:
806 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
807 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
808 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
810 radeon_emit(cs
, va
>> 32);
812 case PIPE_QUERY_PRIMITIVES_EMITTED
:
813 case PIPE_QUERY_PRIMITIVES_GENERATED
:
814 case PIPE_QUERY_SO_STATISTICS
:
815 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
816 emit_sample_streamout(cs
, va
, query
->stream
);
818 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
819 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
820 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
822 case PIPE_QUERY_TIME_ELAPSED
:
823 si_cp_release_mem(sctx
, cs
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
824 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
825 EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
828 case PIPE_QUERY_PIPELINE_STATISTICS
:
829 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
830 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
832 radeon_emit(cs
, va
>> 32);
837 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
841 static void si_query_hw_emit_start(struct si_context
*sctx
,
842 struct si_query_hw
*query
)
846 if (!si_query_buffer_alloc(sctx
, &query
->buffer
, query
->ops
->prepare_buffer
,
850 si_update_occlusion_query_state(sctx
, query
->b
.type
, 1);
851 si_update_prims_generated_query_state(sctx
, query
->b
.type
, 1);
853 if (query
->b
.type
== PIPE_QUERY_PIPELINE_STATISTICS
)
854 sctx
->num_pipeline_stat_queries
++;
856 if (query
->b
.type
!= SI_QUERY_TIME_ELAPSED_SDMA
)
857 si_need_gfx_cs_space(sctx
);
859 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
860 query
->ops
->emit_start(sctx
, query
, query
->buffer
.buf
, va
);
863 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
864 struct si_query_hw
*query
,
865 struct si_resource
*buffer
,
868 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
869 uint64_t fence_va
= 0;
871 switch (query
->b
.type
) {
872 case SI_QUERY_TIME_ELAPSED_SDMA
:
873 si_dma_emit_timestamp(sctx
, buffer
, va
+ 32 - buffer
->gpu_address
);
875 case PIPE_QUERY_OCCLUSION_COUNTER
:
876 case PIPE_QUERY_OCCLUSION_PREDICATE
:
877 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
879 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
880 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
882 radeon_emit(cs
, va
>> 32);
884 fence_va
= va
+ sctx
->screen
->info
.num_render_backends
* 16 - 8;
886 case PIPE_QUERY_PRIMITIVES_EMITTED
:
887 case PIPE_QUERY_PRIMITIVES_GENERATED
:
888 case PIPE_QUERY_SO_STATISTICS
:
889 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
891 emit_sample_streamout(cs
, va
, query
->stream
);
893 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
895 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
896 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
898 case PIPE_QUERY_TIME_ELAPSED
:
901 case PIPE_QUERY_TIMESTAMP
:
902 si_cp_release_mem(sctx
, cs
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
903 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
904 EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
908 case PIPE_QUERY_PIPELINE_STATISTICS
: {
909 unsigned sample_size
= (query
->result_size
- 8) / 2;
912 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
913 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
915 radeon_emit(cs
, va
>> 32);
917 fence_va
= va
+ sample_size
;
923 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
927 si_cp_release_mem(sctx
, cs
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
928 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
929 EOP_DATA_SEL_VALUE_32BIT
,
930 query
->buffer
.buf
, fence_va
, 0x80000000,
935 static void si_query_hw_emit_stop(struct si_context
*sctx
,
936 struct si_query_hw
*query
)
940 /* The queries which need begin already called this in begin_query. */
941 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
942 si_need_gfx_cs_space(sctx
);
943 if (!si_query_buffer_alloc(sctx
, &query
->buffer
, query
->ops
->prepare_buffer
,
948 if (!query
->buffer
.buf
)
949 return; // previous buffer allocation failure
952 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
954 query
->ops
->emit_stop(sctx
, query
, query
->buffer
.buf
, va
);
956 query
->buffer
.results_end
+= query
->result_size
;
958 si_update_occlusion_query_state(sctx
, query
->b
.type
, -1);
959 si_update_prims_generated_query_state(sctx
, query
->b
.type
, -1);
961 if (query
->b
.type
== PIPE_QUERY_PIPELINE_STATISTICS
)
962 sctx
->num_pipeline_stat_queries
--;
965 static void emit_set_predicate(struct si_context
*ctx
,
966 struct si_resource
*buf
, uint64_t va
,
969 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
971 if (ctx
->chip_class
>= GFX9
) {
972 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
975 radeon_emit(cs
, va
>> 32);
977 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
979 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
981 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, buf
, RADEON_USAGE_READ
,
985 static void si_emit_query_predication(struct si_context
*ctx
)
987 struct si_query_hw
*query
= (struct si_query_hw
*)ctx
->render_cond
;
988 struct si_query_buffer
*qbuf
;
990 bool flag_wait
, invert
;
995 invert
= ctx
->render_cond_invert
;
996 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
997 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
999 if (query
->workaround_buf
) {
1000 op
= PRED_OP(PREDICATION_OP_BOOL64
);
1002 switch (query
->b
.type
) {
1003 case PIPE_QUERY_OCCLUSION_COUNTER
:
1004 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1005 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1006 op
= PRED_OP(PREDICATION_OP_ZPASS
);
1008 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1009 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1010 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
1019 /* if true then invert, see GL_ARB_conditional_render_inverted */
1021 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visible or overflow */
1023 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visible or no overflow */
1025 /* Use the value written by compute shader as a workaround. Note that
1026 * the wait flag does not apply in this predication mode.
1028 * The shader outputs the result value to L2. Workarounds only affect GFX8
1029 * and later, where the CP reads data from L2, so we don't need an
1032 if (query
->workaround_buf
) {
1033 uint64_t va
= query
->workaround_buf
->gpu_address
+ query
->workaround_offset
;
1034 emit_set_predicate(ctx
, query
->workaround_buf
, va
, op
);
1038 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
1040 /* emit predicate packets for all data blocks */
1041 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1042 unsigned results_base
= 0;
1043 uint64_t va_base
= qbuf
->buf
->gpu_address
;
1045 while (results_base
< qbuf
->results_end
) {
1046 uint64_t va
= va_base
+ results_base
;
1048 if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
) {
1049 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1050 emit_set_predicate(ctx
, qbuf
->buf
, va
+ 32 * stream
, op
);
1052 /* set CONTINUE bit for all packets except the first */
1053 op
|= PREDICATION_CONTINUE
;
1056 emit_set_predicate(ctx
, qbuf
->buf
, va
, op
);
1057 op
|= PREDICATION_CONTINUE
;
1060 results_base
+= query
->result_size
;
1065 static struct pipe_query
*si_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
1067 struct si_screen
*sscreen
=
1068 (struct si_screen
*)ctx
->screen
;
1070 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
1071 query_type
== PIPE_QUERY_GPU_FINISHED
||
1072 (query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
&&
1073 query_type
!= SI_QUERY_TIME_ELAPSED_SDMA
))
1074 return si_query_sw_create(query_type
);
1076 return si_query_hw_create(sscreen
, query_type
, index
);
1079 static void si_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1081 struct si_context
*sctx
= (struct si_context
*)ctx
;
1082 struct si_query
*squery
= (struct si_query
*)query
;
1084 squery
->ops
->destroy(sctx
->screen
, squery
);
1087 static boolean
si_begin_query(struct pipe_context
*ctx
,
1088 struct pipe_query
*query
)
1090 struct si_context
*sctx
= (struct si_context
*)ctx
;
1091 struct si_query
*squery
= (struct si_query
*)query
;
1093 return squery
->ops
->begin(sctx
, squery
);
1096 bool si_query_hw_begin(struct si_context
*sctx
,
1097 struct si_query
*squery
)
1099 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1101 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
1106 if (!(query
->flags
& SI_QUERY_HW_FLAG_BEGIN_RESUMES
))
1107 si_query_buffer_reset(sctx
, &query
->buffer
);
1109 si_resource_reference(&query
->workaround_buf
, NULL
);
1111 si_query_hw_emit_start(sctx
, query
);
1112 if (!query
->buffer
.buf
)
1115 LIST_ADDTAIL(&query
->b
.active_list
, &sctx
->active_queries
);
1116 sctx
->num_cs_dw_queries_suspend
+= query
->b
.num_cs_dw_suspend
;
1120 static bool si_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1122 struct si_context
*sctx
= (struct si_context
*)ctx
;
1123 struct si_query
*squery
= (struct si_query
*)query
;
1125 return squery
->ops
->end(sctx
, squery
);
1128 bool si_query_hw_end(struct si_context
*sctx
,
1129 struct si_query
*squery
)
1131 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1133 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
)
1134 si_query_buffer_reset(sctx
, &query
->buffer
);
1136 si_query_hw_emit_stop(sctx
, query
);
1138 if (!(query
->flags
& SI_QUERY_HW_FLAG_NO_START
)) {
1139 LIST_DELINIT(&query
->b
.active_list
);
1140 sctx
->num_cs_dw_queries_suspend
-= query
->b
.num_cs_dw_suspend
;
1143 if (!query
->buffer
.buf
)
1149 static void si_get_hw_query_params(struct si_context
*sctx
,
1150 struct si_query_hw
*squery
, int index
,
1151 struct si_hw_query_params
*params
)
1153 unsigned max_rbs
= sctx
->screen
->info
.num_render_backends
;
1155 params
->pair_stride
= 0;
1156 params
->pair_count
= 1;
1158 switch (squery
->b
.type
) {
1159 case PIPE_QUERY_OCCLUSION_COUNTER
:
1160 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1161 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1162 params
->start_offset
= 0;
1163 params
->end_offset
= 8;
1164 params
->fence_offset
= max_rbs
* 16;
1165 params
->pair_stride
= 16;
1166 params
->pair_count
= max_rbs
;
1168 case PIPE_QUERY_TIME_ELAPSED
:
1169 params
->start_offset
= 0;
1170 params
->end_offset
= 8;
1171 params
->fence_offset
= 16;
1173 case PIPE_QUERY_TIMESTAMP
:
1174 params
->start_offset
= 0;
1175 params
->end_offset
= 0;
1176 params
->fence_offset
= 8;
1178 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1179 params
->start_offset
= 8;
1180 params
->end_offset
= 24;
1181 params
->fence_offset
= params
->end_offset
+ 4;
1183 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1184 params
->start_offset
= 0;
1185 params
->end_offset
= 16;
1186 params
->fence_offset
= params
->end_offset
+ 4;
1188 case PIPE_QUERY_SO_STATISTICS
:
1189 params
->start_offset
= 8 - index
* 8;
1190 params
->end_offset
= 24 - index
* 8;
1191 params
->fence_offset
= params
->end_offset
+ 4;
1193 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1194 params
->pair_count
= SI_MAX_STREAMS
;
1195 params
->pair_stride
= 32;
1196 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1197 params
->start_offset
= 0;
1198 params
->end_offset
= 16;
1200 /* We can re-use the high dword of the last 64-bit value as a
1201 * fence: it is initialized as 0, and the high bit is set by
1202 * the write of the streamout stats event.
1204 params
->fence_offset
= squery
->result_size
- 4;
1206 case PIPE_QUERY_PIPELINE_STATISTICS
:
1208 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1209 params
->start_offset
= offsets
[index
];
1210 params
->end_offset
= 88 + offsets
[index
];
1211 params
->fence_offset
= 2 * 88;
1215 unreachable("si_get_hw_query_params unsupported");
1219 static unsigned si_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1220 bool test_status_bit
)
1222 uint32_t *current_result
= (uint32_t*)map
;
1223 uint64_t start
, end
;
1225 start
= (uint64_t)current_result
[start_index
] |
1226 (uint64_t)current_result
[start_index
+1] << 32;
1227 end
= (uint64_t)current_result
[end_index
] |
1228 (uint64_t)current_result
[end_index
+1] << 32;
1230 if (!test_status_bit
||
1231 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1237 static void si_query_hw_add_result(struct si_screen
*sscreen
,
1238 struct si_query_hw
*query
,
1240 union pipe_query_result
*result
)
1242 unsigned max_rbs
= sscreen
->info
.num_render_backends
;
1244 switch (query
->b
.type
) {
1245 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1246 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1247 unsigned results_base
= i
* 16;
1249 si_query_read_result(buffer
+ results_base
, 0, 2, true);
1253 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1254 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1255 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1256 unsigned results_base
= i
* 16;
1257 result
->b
= result
->b
||
1258 si_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1262 case PIPE_QUERY_TIME_ELAPSED
:
1263 result
->u64
+= si_query_read_result(buffer
, 0, 2, false);
1265 case SI_QUERY_TIME_ELAPSED_SDMA
:
1266 result
->u64
+= si_query_read_result(buffer
, 0, 32/4, false);
1268 case PIPE_QUERY_TIMESTAMP
:
1269 result
->u64
= *(uint64_t*)buffer
;
1271 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1272 /* SAMPLE_STREAMOUTSTATS stores this structure:
1274 * u64 NumPrimitivesWritten;
1275 * u64 PrimitiveStorageNeeded;
1277 * We only need NumPrimitivesWritten here. */
1278 result
->u64
+= si_query_read_result(buffer
, 2, 6, true);
1280 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1281 /* Here we read PrimitiveStorageNeeded. */
1282 result
->u64
+= si_query_read_result(buffer
, 0, 4, true);
1284 case PIPE_QUERY_SO_STATISTICS
:
1285 result
->so_statistics
.num_primitives_written
+=
1286 si_query_read_result(buffer
, 2, 6, true);
1287 result
->so_statistics
.primitives_storage_needed
+=
1288 si_query_read_result(buffer
, 0, 4, true);
1290 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1291 result
->b
= result
->b
||
1292 si_query_read_result(buffer
, 2, 6, true) !=
1293 si_query_read_result(buffer
, 0, 4, true);
1295 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1296 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1297 result
->b
= result
->b
||
1298 si_query_read_result(buffer
, 2, 6, true) !=
1299 si_query_read_result(buffer
, 0, 4, true);
1300 buffer
= (char *)buffer
+ 32;
1303 case PIPE_QUERY_PIPELINE_STATISTICS
:
1304 result
->pipeline_statistics
.ps_invocations
+=
1305 si_query_read_result(buffer
, 0, 22, false);
1306 result
->pipeline_statistics
.c_primitives
+=
1307 si_query_read_result(buffer
, 2, 24, false);
1308 result
->pipeline_statistics
.c_invocations
+=
1309 si_query_read_result(buffer
, 4, 26, false);
1310 result
->pipeline_statistics
.vs_invocations
+=
1311 si_query_read_result(buffer
, 6, 28, false);
1312 result
->pipeline_statistics
.gs_invocations
+=
1313 si_query_read_result(buffer
, 8, 30, false);
1314 result
->pipeline_statistics
.gs_primitives
+=
1315 si_query_read_result(buffer
, 10, 32, false);
1316 result
->pipeline_statistics
.ia_primitives
+=
1317 si_query_read_result(buffer
, 12, 34, false);
1318 result
->pipeline_statistics
.ia_vertices
+=
1319 si_query_read_result(buffer
, 14, 36, false);
1320 result
->pipeline_statistics
.hs_invocations
+=
1321 si_query_read_result(buffer
, 16, 38, false);
1322 result
->pipeline_statistics
.ds_invocations
+=
1323 si_query_read_result(buffer
, 18, 40, false);
1324 result
->pipeline_statistics
.cs_invocations
+=
1325 si_query_read_result(buffer
, 20, 42, false);
1326 #if 0 /* for testing */
1327 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1328 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1329 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1330 result
->pipeline_statistics
.ia_vertices
,
1331 result
->pipeline_statistics
.ia_primitives
,
1332 result
->pipeline_statistics
.vs_invocations
,
1333 result
->pipeline_statistics
.hs_invocations
,
1334 result
->pipeline_statistics
.ds_invocations
,
1335 result
->pipeline_statistics
.gs_invocations
,
1336 result
->pipeline_statistics
.gs_primitives
,
1337 result
->pipeline_statistics
.c_invocations
,
1338 result
->pipeline_statistics
.c_primitives
,
1339 result
->pipeline_statistics
.ps_invocations
,
1340 result
->pipeline_statistics
.cs_invocations
);
1348 void si_query_hw_suspend(struct si_context
*sctx
, struct si_query
*query
)
1350 si_query_hw_emit_stop(sctx
, (struct si_query_hw
*)query
);
1353 void si_query_hw_resume(struct si_context
*sctx
, struct si_query
*query
)
1355 si_query_hw_emit_start(sctx
, (struct si_query_hw
*)query
);
1358 static const struct si_query_ops query_hw_ops
= {
1359 .destroy
= si_query_hw_destroy
,
1360 .begin
= si_query_hw_begin
,
1361 .end
= si_query_hw_end
,
1362 .get_result
= si_query_hw_get_result
,
1363 .get_result_resource
= si_query_hw_get_result_resource
,
1365 .suspend
= si_query_hw_suspend
,
1366 .resume
= si_query_hw_resume
,
1369 static boolean
si_get_query_result(struct pipe_context
*ctx
,
1370 struct pipe_query
*query
, boolean wait
,
1371 union pipe_query_result
*result
)
1373 struct si_context
*sctx
= (struct si_context
*)ctx
;
1374 struct si_query
*squery
= (struct si_query
*)query
;
1376 return squery
->ops
->get_result(sctx
, squery
, wait
, result
);
1379 static void si_get_query_result_resource(struct pipe_context
*ctx
,
1380 struct pipe_query
*query
,
1382 enum pipe_query_value_type result_type
,
1384 struct pipe_resource
*resource
,
1387 struct si_context
*sctx
= (struct si_context
*)ctx
;
1388 struct si_query
*squery
= (struct si_query
*)query
;
1390 squery
->ops
->get_result_resource(sctx
, squery
, wait
, result_type
, index
,
1394 static void si_query_hw_clear_result(struct si_query_hw
*query
,
1395 union pipe_query_result
*result
)
1397 util_query_clear_result(result
, query
->b
.type
);
1400 bool si_query_hw_get_result(struct si_context
*sctx
,
1401 struct si_query
*squery
,
1402 bool wait
, union pipe_query_result
*result
)
1404 struct si_screen
*sscreen
= sctx
->screen
;
1405 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1406 struct si_query_buffer
*qbuf
;
1408 query
->ops
->clear_result(query
, result
);
1410 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1411 unsigned usage
= PIPE_TRANSFER_READ
|
1412 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
);
1413 unsigned results_base
= 0;
1416 if (squery
->b
.flushed
)
1417 map
= sctx
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
, usage
);
1419 map
= si_buffer_map_sync_with_rings(sctx
, qbuf
->buf
, usage
);
1424 while (results_base
!= qbuf
->results_end
) {
1425 query
->ops
->add_result(sscreen
, query
, map
+ results_base
,
1427 results_base
+= query
->result_size
;
1431 /* Convert the time to expected units. */
1432 if (squery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1433 squery
->type
== SI_QUERY_TIME_ELAPSED_SDMA
||
1434 squery
->type
== PIPE_QUERY_TIMESTAMP
) {
1435 result
->u64
= (1000000 * result
->u64
) / sscreen
->info
.clock_crystal_freq
;
1440 static void si_restore_qbo_state(struct si_context
*sctx
,
1441 struct si_qbo_state
*st
)
1443 sctx
->b
.bind_compute_state(&sctx
->b
, st
->saved_compute
);
1445 sctx
->b
.set_constant_buffer(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1446 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1448 sctx
->b
.set_shader_buffers(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
,
1449 st
->saved_ssbo_writable_mask
);
1450 for (unsigned i
= 0; i
< 3; ++i
)
1451 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1454 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
1455 struct si_query
*squery
,
1457 enum pipe_query_value_type result_type
,
1459 struct pipe_resource
*resource
,
1462 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1463 struct si_query_buffer
*qbuf
;
1464 struct si_query_buffer
*qbuf_prev
;
1465 struct pipe_resource
*tmp_buffer
= NULL
;
1466 unsigned tmp_buffer_offset
= 0;
1467 struct si_qbo_state saved_state
= {};
1468 struct pipe_grid_info grid
= {};
1469 struct pipe_constant_buffer constant_buffer
= {};
1470 struct pipe_shader_buffer ssbo
[3];
1471 struct si_hw_query_params params
;
1473 uint32_t end_offset
;
1474 uint32_t result_stride
;
1475 uint32_t result_count
;
1477 uint32_t fence_offset
;
1478 uint32_t pair_stride
;
1479 uint32_t pair_count
;
1482 if (!sctx
->query_result_shader
) {
1483 sctx
->query_result_shader
= si_create_query_result_cs(sctx
);
1484 if (!sctx
->query_result_shader
)
1488 if (query
->buffer
.previous
) {
1489 u_suballocator_alloc(sctx
->allocator_zeroed_memory
, 16, 16,
1490 &tmp_buffer_offset
, &tmp_buffer
);
1495 si_save_qbo_state(sctx
, &saved_state
);
1497 si_get_hw_query_params(sctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1498 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1499 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1500 consts
.result_stride
= query
->result_size
;
1501 consts
.pair_stride
= params
.pair_stride
;
1502 consts
.pair_count
= params
.pair_count
;
1504 constant_buffer
.buffer_size
= sizeof(consts
);
1505 constant_buffer
.user_buffer
= &consts
;
1507 ssbo
[1].buffer
= tmp_buffer
;
1508 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1509 ssbo
[1].buffer_size
= 16;
1513 sctx
->b
.bind_compute_state(&sctx
->b
, sctx
->query_result_shader
);
1525 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1526 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
)
1528 else if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1529 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1530 consts
.config
|= 8 | 256;
1531 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1532 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1533 consts
.config
|= 32;
1535 switch (result_type
) {
1536 case PIPE_QUERY_TYPE_U64
:
1537 case PIPE_QUERY_TYPE_I64
:
1538 consts
.config
|= 64;
1540 case PIPE_QUERY_TYPE_I32
:
1541 consts
.config
|= 128;
1543 case PIPE_QUERY_TYPE_U32
:
1547 sctx
->flags
|= sctx
->screen
->barrier_flags
.cp_to_L2
;
1549 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1550 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1551 qbuf_prev
= qbuf
->previous
;
1552 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1553 consts
.config
&= ~3;
1554 if (qbuf
!= &query
->buffer
)
1559 /* Only read the last timestamp. */
1561 consts
.result_count
= 0;
1562 consts
.config
|= 16;
1563 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1566 sctx
->b
.set_constant_buffer(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1568 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1569 ssbo
[0].buffer_offset
= params
.start_offset
;
1570 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1572 if (!qbuf
->previous
) {
1573 ssbo
[2].buffer
= resource
;
1574 ssbo
[2].buffer_offset
= offset
;
1575 ssbo
[2].buffer_size
= 8;
1577 si_resource(resource
)->TC_L2_dirty
= true;
1580 sctx
->b
.set_shader_buffers(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
,
1583 if (wait
&& qbuf
== &query
->buffer
) {
1586 /* Wait for result availability. Wait only for readiness
1587 * of the last entry, since the fence writes should be
1588 * serialized in the CP.
1590 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1591 va
+= params
.fence_offset
;
1593 si_cp_wait_mem(sctx
, sctx
->gfx_cs
, va
, 0x80000000,
1594 0x80000000, WAIT_REG_MEM_EQUAL
);
1597 sctx
->b
.launch_grid(&sctx
->b
, &grid
);
1598 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
1601 si_restore_qbo_state(sctx
, &saved_state
);
1602 pipe_resource_reference(&tmp_buffer
, NULL
);
1605 static void si_render_condition(struct pipe_context
*ctx
,
1606 struct pipe_query
*query
,
1608 enum pipe_render_cond_flag mode
)
1610 struct si_context
*sctx
= (struct si_context
*)ctx
;
1611 struct si_query_hw
*squery
= (struct si_query_hw
*)query
;
1612 struct si_atom
*atom
= &sctx
->atoms
.s
.render_cond
;
1615 bool needs_workaround
= false;
1617 /* There was a firmware regression in GFX8 which causes successive
1618 * SET_PREDICATION packets to give the wrong answer for
1619 * non-inverted stream overflow predication.
1621 if (((sctx
->chip_class
== GFX8
&& sctx
->screen
->info
.pfp_fw_feature
< 49) ||
1622 (sctx
->chip_class
== GFX9
&& sctx
->screen
->info
.pfp_fw_feature
< 38)) &&
1624 (squery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
||
1625 (squery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
&&
1626 (squery
->buffer
.previous
||
1627 squery
->buffer
.results_end
> squery
->result_size
)))) {
1628 needs_workaround
= true;
1631 if (needs_workaround
&& !squery
->workaround_buf
) {
1632 bool old_force_off
= sctx
->render_cond_force_off
;
1633 sctx
->render_cond_force_off
= true;
1635 u_suballocator_alloc(
1636 sctx
->allocator_zeroed_memory
, 8, 8,
1637 &squery
->workaround_offset
,
1638 (struct pipe_resource
**)&squery
->workaround_buf
);
1640 /* Reset to NULL to avoid a redundant SET_PREDICATION
1641 * from launching the compute grid.
1643 sctx
->render_cond
= NULL
;
1645 ctx
->get_query_result_resource(
1646 ctx
, query
, true, PIPE_QUERY_TYPE_U64
, 0,
1647 &squery
->workaround_buf
->b
.b
, squery
->workaround_offset
);
1649 /* Settings this in the render cond atom is too late,
1650 * so set it here. */
1651 sctx
->flags
|= sctx
->screen
->barrier_flags
.L2_to_cp
|
1652 SI_CONTEXT_FLUSH_FOR_RENDER_COND
;
1654 sctx
->render_cond_force_off
= old_force_off
;
1658 sctx
->render_cond
= query
;
1659 sctx
->render_cond_invert
= condition
;
1660 sctx
->render_cond_mode
= mode
;
1662 si_set_atom_dirty(sctx
, atom
, query
!= NULL
);
1665 void si_suspend_queries(struct si_context
*sctx
)
1667 struct si_query
*query
;
1669 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, active_list
)
1670 query
->ops
->suspend(sctx
, query
);
1673 void si_resume_queries(struct si_context
*sctx
)
1675 struct si_query
*query
;
1677 /* Check CS space here. Resuming must not be interrupted by flushes. */
1678 si_need_gfx_cs_space(sctx
);
1680 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, active_list
)
1681 query
->ops
->resume(sctx
, query
);
1684 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1687 .query_type = SI_QUERY_##query_type_, \
1688 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1689 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1690 .group_id = group_id_ \
1693 #define X(name_, query_type_, type_, result_type_) \
1694 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1696 #define XG(group_, name_, query_type_, type_, result_type_) \
1697 XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1699 static struct pipe_driver_query_info si_driver_query_list
[] = {
1700 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1701 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1702 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1703 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1704 X("decompress-calls", DECOMPRESS_CALLS
, UINT64
, AVERAGE
),
1705 X("MRT-draw-calls", MRT_DRAW_CALLS
, UINT64
, AVERAGE
),
1706 X("prim-restart-calls", PRIM_RESTART_CALLS
, UINT64
, AVERAGE
),
1707 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1708 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1709 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1710 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1711 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1712 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1713 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1714 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1715 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1716 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1717 X("num-L2-invalidates", NUM_L2_INVALIDATES
, UINT64
, AVERAGE
),
1718 X("num-L2-writebacks", NUM_L2_WRITEBACKS
, UINT64
, AVERAGE
),
1719 X("num-resident-handles", NUM_RESIDENT_HANDLES
, UINT64
, AVERAGE
),
1720 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS
, UINT64
, AVERAGE
),
1721 X("tc-direct-slots", TC_DIRECT_SLOTS
, UINT64
, AVERAGE
),
1722 X("tc-num-syncs", TC_NUM_SYNCS
, UINT64
, AVERAGE
),
1723 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1724 X("gallium-thread-busy", GALLIUM_THREAD_BUSY
, UINT64
, AVERAGE
),
1725 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1726 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1727 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1728 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1729 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1730 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1731 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1732 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1733 X("GFX-BO-list-size", GFX_BO_LIST_SIZE
, UINT64
, AVERAGE
),
1734 X("GFX-IB-size", GFX_IB_SIZE
, UINT64
, AVERAGE
),
1735 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1736 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1737 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS
, UINT64
, CUMULATIVE
),
1738 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1739 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1740 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1741 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1743 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1744 * which use it as a fallback path to detect the GPU type.
1746 * Note: The names of these queries are significant for GPUPerfStudio
1747 * (and possibly their order as well). */
1748 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1749 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1750 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1751 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1752 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1754 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1755 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1756 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1758 /* The following queries must be at the end of the list because their
1759 * availability is adjusted dynamically based on the DRM version. */
1760 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1761 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
1762 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
1763 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
1764 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
1765 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
1766 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
1767 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
1768 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
1769 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
1770 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
1771 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
1772 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
1773 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
1776 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
1779 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
1780 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
1781 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
1782 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
1783 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY
, UINT64
, AVERAGE
),
1784 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
1791 static unsigned si_get_num_queries(struct si_screen
*sscreen
)
1794 if (sscreen
->info
.drm_major
== 3) {
1795 if (sscreen
->info
.chip_class
>= GFX8
)
1796 return ARRAY_SIZE(si_driver_query_list
);
1798 return ARRAY_SIZE(si_driver_query_list
) - 7;
1802 if (sscreen
->info
.has_read_registers_query
) {
1803 if (sscreen
->info
.chip_class
== GFX7
)
1804 return ARRAY_SIZE(si_driver_query_list
) - 6;
1806 return ARRAY_SIZE(si_driver_query_list
) - 7;
1809 return ARRAY_SIZE(si_driver_query_list
) - 21;
1812 static int si_get_driver_query_info(struct pipe_screen
*screen
,
1814 struct pipe_driver_query_info
*info
)
1816 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1817 unsigned num_queries
= si_get_num_queries(sscreen
);
1820 unsigned num_perfcounters
=
1821 si_get_perfcounter_info(sscreen
, 0, NULL
);
1823 return num_queries
+ num_perfcounters
;
1826 if (index
>= num_queries
)
1827 return si_get_perfcounter_info(sscreen
, index
- num_queries
, info
);
1829 *info
= si_driver_query_list
[index
];
1831 switch (info
->query_type
) {
1832 case SI_QUERY_REQUESTED_VRAM
:
1833 case SI_QUERY_VRAM_USAGE
:
1834 case SI_QUERY_MAPPED_VRAM
:
1835 info
->max_value
.u64
= sscreen
->info
.vram_size
;
1837 case SI_QUERY_REQUESTED_GTT
:
1838 case SI_QUERY_GTT_USAGE
:
1839 case SI_QUERY_MAPPED_GTT
:
1840 info
->max_value
.u64
= sscreen
->info
.gart_size
;
1842 case SI_QUERY_GPU_TEMPERATURE
:
1843 info
->max_value
.u64
= 125;
1845 case SI_QUERY_VRAM_VIS_USAGE
:
1846 info
->max_value
.u64
= sscreen
->info
.vram_vis_size
;
1850 if (info
->group_id
!= ~(unsigned)0 && sscreen
->perfcounters
)
1851 info
->group_id
+= sscreen
->perfcounters
->num_groups
;
1856 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1857 * performance counter groups, so be careful when changing this and related
1860 static int si_get_driver_query_group_info(struct pipe_screen
*screen
,
1862 struct pipe_driver_query_group_info
*info
)
1864 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1865 unsigned num_pc_groups
= 0;
1867 if (sscreen
->perfcounters
)
1868 num_pc_groups
= sscreen
->perfcounters
->num_groups
;
1871 return num_pc_groups
+ SI_NUM_SW_QUERY_GROUPS
;
1873 if (index
< num_pc_groups
)
1874 return si_get_perfcounter_group_info(sscreen
, index
, info
);
1876 index
-= num_pc_groups
;
1877 if (index
>= SI_NUM_SW_QUERY_GROUPS
)
1880 info
->name
= "GPIN";
1881 info
->max_active_queries
= 5;
1882 info
->num_queries
= 5;
1886 void si_init_query_functions(struct si_context
*sctx
)
1888 sctx
->b
.create_query
= si_create_query
;
1889 sctx
->b
.create_batch_query
= si_create_batch_query
;
1890 sctx
->b
.destroy_query
= si_destroy_query
;
1891 sctx
->b
.begin_query
= si_begin_query
;
1892 sctx
->b
.end_query
= si_end_query
;
1893 sctx
->b
.get_query_result
= si_get_query_result
;
1894 sctx
->b
.get_query_result_resource
= si_get_query_result_resource
;
1895 sctx
->atoms
.s
.render_cond
.emit
= si_emit_query_predication
;
1897 if (((struct si_screen
*)sctx
->b
.screen
)->info
.num_render_backends
> 0)
1898 sctx
->b
.render_condition
= si_render_condition
;
1900 LIST_INITHEAD(&sctx
->active_queries
);
1903 void si_init_screen_query_functions(struct si_screen
*sscreen
)
1905 sscreen
->b
.get_driver_query_info
= si_get_driver_query_info
;
1906 sscreen
->b
.get_driver_query_group_info
= si_get_driver_query_group_info
;