2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/os_time.h"
32 #include "util/u_suballoc.h"
33 #include "amd/common/sid.h"
35 static const struct si_query_ops query_hw_ops
;
37 struct si_hw_query_params
{
38 unsigned start_offset
;
40 unsigned fence_offset
;
45 /* Queries without buffer handling or suspend/resume. */
49 uint64_t begin_result
;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle
*fence
;
59 static void si_query_sw_destroy(struct si_context
*sctx
,
60 struct si_query
*squery
)
62 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
64 sctx
->b
.screen
->fence_reference(sctx
->b
.screen
, &query
->fence
, NULL
);
68 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
71 case SI_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
72 case SI_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
73 case SI_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
74 case SI_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
75 case SI_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
76 case SI_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
77 case SI_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
78 case SI_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
79 case SI_QUERY_GFX_BO_LIST_SIZE
: return RADEON_GFX_BO_LIST_COUNTER
;
80 case SI_QUERY_GFX_IB_SIZE
: return RADEON_GFX_IB_SIZE_COUNTER
;
81 case SI_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
82 case SI_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
83 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS
;
84 case SI_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
85 case SI_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
86 case SI_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
87 case SI_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
88 case SI_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
89 case SI_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
90 case SI_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
91 default: unreachable("query type does not correspond to winsys id");
95 static int64_t si_finish_dma_get_cpu_time(struct si_context
*sctx
)
97 struct pipe_fence_handle
*fence
= NULL
;
99 si_flush_dma_cs(sctx
, 0, &fence
);
101 sctx
->ws
->fence_wait(sctx
->ws
, fence
, PIPE_TIMEOUT_INFINITE
);
102 sctx
->ws
->fence_reference(&fence
, NULL
);
105 return os_time_get_nano();
108 static bool si_query_sw_begin(struct si_context
*sctx
,
109 struct si_query
*squery
)
111 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
112 enum radeon_value_id ws_id
;
114 switch(query
->b
.type
) {
115 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
116 case PIPE_QUERY_GPU_FINISHED
:
118 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
119 query
->begin_result
= si_finish_dma_get_cpu_time(sctx
);
121 case SI_QUERY_DRAW_CALLS
:
122 query
->begin_result
= sctx
->num_draw_calls
;
124 case SI_QUERY_DECOMPRESS_CALLS
:
125 query
->begin_result
= sctx
->num_decompress_calls
;
127 case SI_QUERY_MRT_DRAW_CALLS
:
128 query
->begin_result
= sctx
->num_mrt_draw_calls
;
130 case SI_QUERY_PRIM_RESTART_CALLS
:
131 query
->begin_result
= sctx
->num_prim_restart_calls
;
133 case SI_QUERY_SPILL_DRAW_CALLS
:
134 query
->begin_result
= sctx
->num_spill_draw_calls
;
136 case SI_QUERY_COMPUTE_CALLS
:
137 query
->begin_result
= sctx
->num_compute_calls
;
139 case SI_QUERY_SPILL_COMPUTE_CALLS
:
140 query
->begin_result
= sctx
->num_spill_compute_calls
;
142 case SI_QUERY_DMA_CALLS
:
143 query
->begin_result
= sctx
->num_dma_calls
;
145 case SI_QUERY_CP_DMA_CALLS
:
146 query
->begin_result
= sctx
->num_cp_dma_calls
;
148 case SI_QUERY_NUM_VS_FLUSHES
:
149 query
->begin_result
= sctx
->num_vs_flushes
;
151 case SI_QUERY_NUM_PS_FLUSHES
:
152 query
->begin_result
= sctx
->num_ps_flushes
;
154 case SI_QUERY_NUM_CS_FLUSHES
:
155 query
->begin_result
= sctx
->num_cs_flushes
;
157 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
158 query
->begin_result
= sctx
->num_cb_cache_flushes
;
160 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
161 query
->begin_result
= sctx
->num_db_cache_flushes
;
163 case SI_QUERY_NUM_L2_INVALIDATES
:
164 query
->begin_result
= sctx
->num_L2_invalidates
;
166 case SI_QUERY_NUM_L2_WRITEBACKS
:
167 query
->begin_result
= sctx
->num_L2_writebacks
;
169 case SI_QUERY_NUM_RESIDENT_HANDLES
:
170 query
->begin_result
= sctx
->num_resident_handles
;
172 case SI_QUERY_TC_OFFLOADED_SLOTS
:
173 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
175 case SI_QUERY_TC_DIRECT_SLOTS
:
176 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
178 case SI_QUERY_TC_NUM_SYNCS
:
179 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
181 case SI_QUERY_REQUESTED_VRAM
:
182 case SI_QUERY_REQUESTED_GTT
:
183 case SI_QUERY_MAPPED_VRAM
:
184 case SI_QUERY_MAPPED_GTT
:
185 case SI_QUERY_VRAM_USAGE
:
186 case SI_QUERY_VRAM_VIS_USAGE
:
187 case SI_QUERY_GTT_USAGE
:
188 case SI_QUERY_GPU_TEMPERATURE
:
189 case SI_QUERY_CURRENT_GPU_SCLK
:
190 case SI_QUERY_CURRENT_GPU_MCLK
:
191 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
192 case SI_QUERY_NUM_MAPPED_BUFFERS
:
193 query
->begin_result
= 0;
195 case SI_QUERY_BUFFER_WAIT_TIME
:
196 case SI_QUERY_GFX_IB_SIZE
:
197 case SI_QUERY_NUM_GFX_IBS
:
198 case SI_QUERY_NUM_SDMA_IBS
:
199 case SI_QUERY_NUM_BYTES_MOVED
:
200 case SI_QUERY_NUM_EVICTIONS
:
201 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
202 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
203 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
206 case SI_QUERY_GFX_BO_LIST_SIZE
:
207 ws_id
= winsys_id_from_type(query
->b
.type
);
208 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
209 query
->begin_time
= sctx
->ws
->query_value(sctx
->ws
,
212 case SI_QUERY_CS_THREAD_BUSY
:
213 ws_id
= winsys_id_from_type(query
->b
.type
);
214 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
215 query
->begin_time
= os_time_get_nano();
217 case SI_QUERY_GALLIUM_THREAD_BUSY
:
218 query
->begin_result
=
219 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
220 query
->begin_time
= os_time_get_nano();
222 case SI_QUERY_GPU_LOAD
:
223 case SI_QUERY_GPU_SHADERS_BUSY
:
224 case SI_QUERY_GPU_TA_BUSY
:
225 case SI_QUERY_GPU_GDS_BUSY
:
226 case SI_QUERY_GPU_VGT_BUSY
:
227 case SI_QUERY_GPU_IA_BUSY
:
228 case SI_QUERY_GPU_SX_BUSY
:
229 case SI_QUERY_GPU_WD_BUSY
:
230 case SI_QUERY_GPU_BCI_BUSY
:
231 case SI_QUERY_GPU_SC_BUSY
:
232 case SI_QUERY_GPU_PA_BUSY
:
233 case SI_QUERY_GPU_DB_BUSY
:
234 case SI_QUERY_GPU_CP_BUSY
:
235 case SI_QUERY_GPU_CB_BUSY
:
236 case SI_QUERY_GPU_SDMA_BUSY
:
237 case SI_QUERY_GPU_PFP_BUSY
:
238 case SI_QUERY_GPU_MEQ_BUSY
:
239 case SI_QUERY_GPU_ME_BUSY
:
240 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
241 case SI_QUERY_GPU_CP_DMA_BUSY
:
242 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
243 query
->begin_result
= si_begin_counter(sctx
->screen
,
246 case SI_QUERY_NUM_COMPILATIONS
:
247 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
249 case SI_QUERY_NUM_SHADERS_CREATED
:
250 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
252 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
253 query
->begin_result
=
254 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
256 case SI_QUERY_PD_NUM_PRIMS_ACCEPTED
:
257 query
->begin_result
= sctx
->compute_num_verts_accepted
;
259 case SI_QUERY_PD_NUM_PRIMS_REJECTED
:
260 query
->begin_result
= sctx
->compute_num_verts_rejected
;
262 case SI_QUERY_PD_NUM_PRIMS_INELIGIBLE
:
263 query
->begin_result
= sctx
->compute_num_verts_ineligible
;
265 case SI_QUERY_GPIN_ASIC_ID
:
266 case SI_QUERY_GPIN_NUM_SIMD
:
267 case SI_QUERY_GPIN_NUM_RB
:
268 case SI_QUERY_GPIN_NUM_SPI
:
269 case SI_QUERY_GPIN_NUM_SE
:
272 unreachable("si_query_sw_begin: bad query type");
278 static bool si_query_sw_end(struct si_context
*sctx
,
279 struct si_query
*squery
)
281 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
282 enum radeon_value_id ws_id
;
284 switch(query
->b
.type
) {
285 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
287 case PIPE_QUERY_GPU_FINISHED
:
288 sctx
->b
.flush(&sctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
290 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
291 query
->end_result
= si_finish_dma_get_cpu_time(sctx
);
293 case SI_QUERY_DRAW_CALLS
:
294 query
->end_result
= sctx
->num_draw_calls
;
296 case SI_QUERY_DECOMPRESS_CALLS
:
297 query
->end_result
= sctx
->num_decompress_calls
;
299 case SI_QUERY_MRT_DRAW_CALLS
:
300 query
->end_result
= sctx
->num_mrt_draw_calls
;
302 case SI_QUERY_PRIM_RESTART_CALLS
:
303 query
->end_result
= sctx
->num_prim_restart_calls
;
305 case SI_QUERY_SPILL_DRAW_CALLS
:
306 query
->end_result
= sctx
->num_spill_draw_calls
;
308 case SI_QUERY_COMPUTE_CALLS
:
309 query
->end_result
= sctx
->num_compute_calls
;
311 case SI_QUERY_SPILL_COMPUTE_CALLS
:
312 query
->end_result
= sctx
->num_spill_compute_calls
;
314 case SI_QUERY_DMA_CALLS
:
315 query
->end_result
= sctx
->num_dma_calls
;
317 case SI_QUERY_CP_DMA_CALLS
:
318 query
->end_result
= sctx
->num_cp_dma_calls
;
320 case SI_QUERY_NUM_VS_FLUSHES
:
321 query
->end_result
= sctx
->num_vs_flushes
;
323 case SI_QUERY_NUM_PS_FLUSHES
:
324 query
->end_result
= sctx
->num_ps_flushes
;
326 case SI_QUERY_NUM_CS_FLUSHES
:
327 query
->end_result
= sctx
->num_cs_flushes
;
329 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
330 query
->end_result
= sctx
->num_cb_cache_flushes
;
332 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
333 query
->end_result
= sctx
->num_db_cache_flushes
;
335 case SI_QUERY_NUM_L2_INVALIDATES
:
336 query
->end_result
= sctx
->num_L2_invalidates
;
338 case SI_QUERY_NUM_L2_WRITEBACKS
:
339 query
->end_result
= sctx
->num_L2_writebacks
;
341 case SI_QUERY_NUM_RESIDENT_HANDLES
:
342 query
->end_result
= sctx
->num_resident_handles
;
344 case SI_QUERY_TC_OFFLOADED_SLOTS
:
345 query
->end_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
347 case SI_QUERY_TC_DIRECT_SLOTS
:
348 query
->end_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
350 case SI_QUERY_TC_NUM_SYNCS
:
351 query
->end_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
353 case SI_QUERY_REQUESTED_VRAM
:
354 case SI_QUERY_REQUESTED_GTT
:
355 case SI_QUERY_MAPPED_VRAM
:
356 case SI_QUERY_MAPPED_GTT
:
357 case SI_QUERY_VRAM_USAGE
:
358 case SI_QUERY_VRAM_VIS_USAGE
:
359 case SI_QUERY_GTT_USAGE
:
360 case SI_QUERY_GPU_TEMPERATURE
:
361 case SI_QUERY_CURRENT_GPU_SCLK
:
362 case SI_QUERY_CURRENT_GPU_MCLK
:
363 case SI_QUERY_BUFFER_WAIT_TIME
:
364 case SI_QUERY_GFX_IB_SIZE
:
365 case SI_QUERY_NUM_MAPPED_BUFFERS
:
366 case SI_QUERY_NUM_GFX_IBS
:
367 case SI_QUERY_NUM_SDMA_IBS
:
368 case SI_QUERY_NUM_BYTES_MOVED
:
369 case SI_QUERY_NUM_EVICTIONS
:
370 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
371 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
372 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
375 case SI_QUERY_GFX_BO_LIST_SIZE
:
376 ws_id
= winsys_id_from_type(query
->b
.type
);
377 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
378 query
->end_time
= sctx
->ws
->query_value(sctx
->ws
,
381 case SI_QUERY_CS_THREAD_BUSY
:
382 ws_id
= winsys_id_from_type(query
->b
.type
);
383 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
384 query
->end_time
= os_time_get_nano();
386 case SI_QUERY_GALLIUM_THREAD_BUSY
:
388 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
389 query
->end_time
= os_time_get_nano();
391 case SI_QUERY_GPU_LOAD
:
392 case SI_QUERY_GPU_SHADERS_BUSY
:
393 case SI_QUERY_GPU_TA_BUSY
:
394 case SI_QUERY_GPU_GDS_BUSY
:
395 case SI_QUERY_GPU_VGT_BUSY
:
396 case SI_QUERY_GPU_IA_BUSY
:
397 case SI_QUERY_GPU_SX_BUSY
:
398 case SI_QUERY_GPU_WD_BUSY
:
399 case SI_QUERY_GPU_BCI_BUSY
:
400 case SI_QUERY_GPU_SC_BUSY
:
401 case SI_QUERY_GPU_PA_BUSY
:
402 case SI_QUERY_GPU_DB_BUSY
:
403 case SI_QUERY_GPU_CP_BUSY
:
404 case SI_QUERY_GPU_CB_BUSY
:
405 case SI_QUERY_GPU_SDMA_BUSY
:
406 case SI_QUERY_GPU_PFP_BUSY
:
407 case SI_QUERY_GPU_MEQ_BUSY
:
408 case SI_QUERY_GPU_ME_BUSY
:
409 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
410 case SI_QUERY_GPU_CP_DMA_BUSY
:
411 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
412 query
->end_result
= si_end_counter(sctx
->screen
,
414 query
->begin_result
);
415 query
->begin_result
= 0;
417 case SI_QUERY_NUM_COMPILATIONS
:
418 query
->end_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
420 case SI_QUERY_NUM_SHADERS_CREATED
:
421 query
->end_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
423 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
424 query
->end_result
= sctx
->last_tex_ps_draw_ratio
;
426 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
428 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
430 case SI_QUERY_PD_NUM_PRIMS_ACCEPTED
:
431 query
->end_result
= sctx
->compute_num_verts_accepted
;
433 case SI_QUERY_PD_NUM_PRIMS_REJECTED
:
434 query
->end_result
= sctx
->compute_num_verts_rejected
;
436 case SI_QUERY_PD_NUM_PRIMS_INELIGIBLE
:
437 query
->end_result
= sctx
->compute_num_verts_ineligible
;
439 case SI_QUERY_GPIN_ASIC_ID
:
440 case SI_QUERY_GPIN_NUM_SIMD
:
441 case SI_QUERY_GPIN_NUM_RB
:
442 case SI_QUERY_GPIN_NUM_SPI
:
443 case SI_QUERY_GPIN_NUM_SE
:
446 unreachable("si_query_sw_end: bad query type");
452 static bool si_query_sw_get_result(struct si_context
*sctx
,
453 struct si_query
*squery
,
455 union pipe_query_result
*result
)
457 struct si_query_sw
*query
= (struct si_query_sw
*)squery
;
459 switch (query
->b
.type
) {
460 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
461 /* Convert from cycles per millisecond to cycles per second (Hz). */
462 result
->timestamp_disjoint
.frequency
=
463 (uint64_t)sctx
->screen
->info
.clock_crystal_freq
* 1000;
464 result
->timestamp_disjoint
.disjoint
= false;
466 case PIPE_QUERY_GPU_FINISHED
: {
467 struct pipe_screen
*screen
= sctx
->b
.screen
;
468 struct pipe_context
*ctx
= squery
->b
.flushed
? NULL
: &sctx
->b
;
470 result
->b
= screen
->fence_finish(screen
, ctx
, query
->fence
,
471 wait
? PIPE_TIMEOUT_INFINITE
: 0);
475 case SI_QUERY_GFX_BO_LIST_SIZE
:
476 result
->u64
= (query
->end_result
- query
->begin_result
) /
477 (query
->end_time
- query
->begin_time
);
479 case SI_QUERY_CS_THREAD_BUSY
:
480 case SI_QUERY_GALLIUM_THREAD_BUSY
:
481 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
482 (query
->end_time
- query
->begin_time
);
484 case SI_QUERY_PD_NUM_PRIMS_ACCEPTED
:
485 case SI_QUERY_PD_NUM_PRIMS_REJECTED
:
486 case SI_QUERY_PD_NUM_PRIMS_INELIGIBLE
:
487 result
->u64
= ((unsigned)query
->end_result
-
488 (unsigned)query
->begin_result
) / 3;
490 case SI_QUERY_GPIN_ASIC_ID
:
493 case SI_QUERY_GPIN_NUM_SIMD
:
494 result
->u32
= sctx
->screen
->info
.num_good_compute_units
;
496 case SI_QUERY_GPIN_NUM_RB
:
497 result
->u32
= sctx
->screen
->info
.num_render_backends
;
499 case SI_QUERY_GPIN_NUM_SPI
:
500 result
->u32
= 1; /* all supported chips have one SPI per SE */
502 case SI_QUERY_GPIN_NUM_SE
:
503 result
->u32
= sctx
->screen
->info
.max_se
;
507 result
->u64
= query
->end_result
- query
->begin_result
;
509 switch (query
->b
.type
) {
510 case SI_QUERY_BUFFER_WAIT_TIME
:
511 case SI_QUERY_GPU_TEMPERATURE
:
514 case SI_QUERY_CURRENT_GPU_SCLK
:
515 case SI_QUERY_CURRENT_GPU_MCLK
:
516 result
->u64
*= 1000000;
524 static const struct si_query_ops sw_query_ops
= {
525 .destroy
= si_query_sw_destroy
,
526 .begin
= si_query_sw_begin
,
527 .end
= si_query_sw_end
,
528 .get_result
= si_query_sw_get_result
,
529 .get_result_resource
= NULL
532 static struct pipe_query
*si_query_sw_create(unsigned query_type
)
534 struct si_query_sw
*query
;
536 query
= CALLOC_STRUCT(si_query_sw
);
540 query
->b
.type
= query_type
;
541 query
->b
.ops
= &sw_query_ops
;
543 return (struct pipe_query
*)query
;
546 void si_query_buffer_destroy(struct si_screen
*sscreen
, struct si_query_buffer
*buffer
)
548 struct si_query_buffer
*prev
= buffer
->previous
;
550 /* Release all query buffers. */
552 struct si_query_buffer
*qbuf
= prev
;
553 prev
= prev
->previous
;
554 si_resource_reference(&qbuf
->buf
, NULL
);
558 si_resource_reference(&buffer
->buf
, NULL
);
561 void si_query_buffer_reset(struct si_context
*sctx
, struct si_query_buffer
*buffer
)
563 /* Discard all query buffers except for the oldest. */
564 while (buffer
->previous
) {
565 struct si_query_buffer
*qbuf
= buffer
->previous
;
566 buffer
->previous
= qbuf
->previous
;
568 si_resource_reference(&buffer
->buf
, NULL
);
569 buffer
->buf
= qbuf
->buf
; /* move ownership */
572 buffer
->results_end
= 0;
577 /* Discard even the oldest buffer if it can't be mapped without a stall. */
578 if (si_rings_is_buffer_referenced(sctx
, buffer
->buf
->buf
, RADEON_USAGE_READWRITE
) ||
579 !sctx
->ws
->buffer_wait(buffer
->buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
580 si_resource_reference(&buffer
->buf
, NULL
);
582 buffer
->unprepared
= true;
586 bool si_query_buffer_alloc(struct si_context
*sctx
, struct si_query_buffer
*buffer
,
587 bool (*prepare_buffer
)(struct si_context
*, struct si_query_buffer
*),
590 bool unprepared
= buffer
->unprepared
;
591 buffer
->unprepared
= false;
593 if (!buffer
->buf
|| buffer
->results_end
+ size
> buffer
->buf
->b
.b
.width0
) {
595 struct si_query_buffer
*qbuf
= MALLOC_STRUCT(si_query_buffer
);
596 memcpy(qbuf
, buffer
, sizeof(*qbuf
));
597 buffer
->previous
= qbuf
;
599 buffer
->results_end
= 0;
601 /* Queries are normally read by the CPU after
602 * being written by the gpu, hence staging is probably a good
605 struct si_screen
*screen
= sctx
->screen
;
606 unsigned buf_size
= MAX2(size
, screen
->info
.min_alloc_size
);
607 buffer
->buf
= si_resource(
608 pipe_buffer_create(&screen
->b
, 0, PIPE_USAGE_STAGING
, buf_size
));
609 if (unlikely(!buffer
->buf
))
614 if (unprepared
&& prepare_buffer
) {
615 if (unlikely(!prepare_buffer(sctx
, buffer
))) {
616 si_resource_reference(&buffer
->buf
, NULL
);
625 void si_query_hw_destroy(struct si_context
*sctx
, struct si_query
*squery
)
627 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
629 si_query_buffer_destroy(sctx
->screen
, &query
->buffer
);
630 si_resource_reference(&query
->workaround_buf
, NULL
);
634 static bool si_query_hw_prepare_buffer(struct si_context
*sctx
,
635 struct si_query_buffer
*qbuf
)
637 static const struct si_query_hw si_query_hw_s
;
638 struct si_query_hw
*query
= container_of(qbuf
, &si_query_hw_s
, buffer
);
639 struct si_screen
*screen
= sctx
->screen
;
641 /* The caller ensures that the buffer is currently unused by the GPU. */
642 uint32_t *results
= screen
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
,
643 PIPE_TRANSFER_WRITE
|
644 PIPE_TRANSFER_UNSYNCHRONIZED
);
648 memset(results
, 0, qbuf
->buf
->b
.b
.width0
);
650 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
651 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
652 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
653 unsigned max_rbs
= screen
->info
.num_render_backends
;
654 unsigned enabled_rb_mask
= screen
->info
.enabled_rb_mask
;
655 unsigned num_results
;
658 /* Set top bits for unused backends. */
659 num_results
= qbuf
->buf
->b
.b
.width0
/ query
->result_size
;
660 for (j
= 0; j
< num_results
; j
++) {
661 for (i
= 0; i
< max_rbs
; i
++) {
662 if (!(enabled_rb_mask
& (1<<i
))) {
663 results
[(i
* 4)+1] = 0x80000000;
664 results
[(i
* 4)+3] = 0x80000000;
667 results
+= 4 * max_rbs
;
674 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
675 struct si_query
*squery
,
677 enum pipe_query_value_type result_type
,
679 struct pipe_resource
*resource
,
682 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
683 struct si_query_hw
*query
,
684 struct si_resource
*buffer
,
686 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
687 struct si_query_hw
*query
,
688 struct si_resource
*buffer
,
690 static void si_query_hw_add_result(struct si_screen
*sscreen
,
691 struct si_query_hw
*, void *buffer
,
692 union pipe_query_result
*result
);
693 static void si_query_hw_clear_result(struct si_query_hw
*,
694 union pipe_query_result
*);
696 static struct si_query_hw_ops query_hw_default_hw_ops
= {
697 .prepare_buffer
= si_query_hw_prepare_buffer
,
698 .emit_start
= si_query_hw_do_emit_start
,
699 .emit_stop
= si_query_hw_do_emit_stop
,
700 .clear_result
= si_query_hw_clear_result
,
701 .add_result
= si_query_hw_add_result
,
704 static struct pipe_query
*si_query_hw_create(struct si_screen
*sscreen
,
708 struct si_query_hw
*query
= CALLOC_STRUCT(si_query_hw
);
712 query
->b
.type
= query_type
;
713 query
->b
.ops
= &query_hw_ops
;
714 query
->ops
= &query_hw_default_hw_ops
;
716 switch (query_type
) {
717 case PIPE_QUERY_OCCLUSION_COUNTER
:
718 case PIPE_QUERY_OCCLUSION_PREDICATE
:
719 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
720 query
->result_size
= 16 * sscreen
->info
.num_render_backends
;
721 query
->result_size
+= 16; /* for the fence + alignment */
722 query
->b
.num_cs_dw_suspend
= 6 + si_cp_write_fence_dwords(sscreen
);
724 case SI_QUERY_TIME_ELAPSED_SDMA
:
725 /* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
726 query
->result_size
= 64;
728 case PIPE_QUERY_TIME_ELAPSED
:
729 query
->result_size
= 24;
730 query
->b
.num_cs_dw_suspend
= 8 + si_cp_write_fence_dwords(sscreen
);
732 case PIPE_QUERY_TIMESTAMP
:
733 query
->result_size
= 16;
734 query
->b
.num_cs_dw_suspend
= 8 + si_cp_write_fence_dwords(sscreen
);
735 query
->flags
= SI_QUERY_HW_FLAG_NO_START
;
737 case PIPE_QUERY_PRIMITIVES_EMITTED
:
738 case PIPE_QUERY_PRIMITIVES_GENERATED
:
739 case PIPE_QUERY_SO_STATISTICS
:
740 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
741 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
742 query
->result_size
= 32;
743 query
->b
.num_cs_dw_suspend
= 6;
744 query
->stream
= index
;
746 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
747 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
748 query
->result_size
= 32 * SI_MAX_STREAMS
;
749 query
->b
.num_cs_dw_suspend
= 6 * SI_MAX_STREAMS
;
751 case PIPE_QUERY_PIPELINE_STATISTICS
:
752 /* 11 values on GCN. */
753 query
->result_size
= 11 * 16;
754 query
->result_size
+= 8; /* for the fence + alignment */
755 query
->b
.num_cs_dw_suspend
= 6 + si_cp_write_fence_dwords(sscreen
);
763 return (struct pipe_query
*)query
;
766 static void si_update_occlusion_query_state(struct si_context
*sctx
,
767 unsigned type
, int diff
)
769 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
770 type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
771 type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
772 bool old_enable
= sctx
->num_occlusion_queries
!= 0;
773 bool old_perfect_enable
=
774 sctx
->num_perfect_occlusion_queries
!= 0;
775 bool enable
, perfect_enable
;
777 sctx
->num_occlusion_queries
+= diff
;
778 assert(sctx
->num_occlusion_queries
>= 0);
780 if (type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
781 sctx
->num_perfect_occlusion_queries
+= diff
;
782 assert(sctx
->num_perfect_occlusion_queries
>= 0);
785 enable
= sctx
->num_occlusion_queries
!= 0;
786 perfect_enable
= sctx
->num_perfect_occlusion_queries
!= 0;
788 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
789 si_set_occlusion_query_state(sctx
, old_perfect_enable
);
794 static unsigned event_type_for_stream(unsigned stream
)
798 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
799 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
800 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
801 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
805 static void emit_sample_streamout(struct radeon_cmdbuf
*cs
, uint64_t va
,
808 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
809 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(stream
)) | EVENT_INDEX(3));
811 radeon_emit(cs
, va
>> 32);
814 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
815 struct si_query_hw
*query
,
816 struct si_resource
*buffer
,
819 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
821 switch (query
->b
.type
) {
822 case SI_QUERY_TIME_ELAPSED_SDMA
:
823 si_dma_emit_timestamp(sctx
, buffer
, va
- buffer
->gpu_address
);
825 case PIPE_QUERY_OCCLUSION_COUNTER
:
826 case PIPE_QUERY_OCCLUSION_PREDICATE
:
827 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
828 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
829 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
831 radeon_emit(cs
, va
>> 32);
833 case PIPE_QUERY_PRIMITIVES_EMITTED
:
834 case PIPE_QUERY_PRIMITIVES_GENERATED
:
835 case PIPE_QUERY_SO_STATISTICS
:
836 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
837 emit_sample_streamout(cs
, va
, query
->stream
);
839 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
840 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
841 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
843 case PIPE_QUERY_TIME_ELAPSED
:
844 si_cp_release_mem(sctx
, cs
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
845 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
846 EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
849 case PIPE_QUERY_PIPELINE_STATISTICS
:
850 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
851 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
853 radeon_emit(cs
, va
>> 32);
858 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
862 static void si_query_hw_emit_start(struct si_context
*sctx
,
863 struct si_query_hw
*query
)
867 if (!si_query_buffer_alloc(sctx
, &query
->buffer
, query
->ops
->prepare_buffer
,
871 si_update_occlusion_query_state(sctx
, query
->b
.type
, 1);
872 si_update_prims_generated_query_state(sctx
, query
->b
.type
, 1);
874 if (query
->b
.type
== PIPE_QUERY_PIPELINE_STATISTICS
)
875 sctx
->num_pipeline_stat_queries
++;
877 if (query
->b
.type
!= SI_QUERY_TIME_ELAPSED_SDMA
)
878 si_need_gfx_cs_space(sctx
);
880 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
881 query
->ops
->emit_start(sctx
, query
, query
->buffer
.buf
, va
);
884 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
885 struct si_query_hw
*query
,
886 struct si_resource
*buffer
,
889 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
890 uint64_t fence_va
= 0;
892 switch (query
->b
.type
) {
893 case SI_QUERY_TIME_ELAPSED_SDMA
:
894 si_dma_emit_timestamp(sctx
, buffer
, va
+ 32 - buffer
->gpu_address
);
896 case PIPE_QUERY_OCCLUSION_COUNTER
:
897 case PIPE_QUERY_OCCLUSION_PREDICATE
:
898 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
900 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
901 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
903 radeon_emit(cs
, va
>> 32);
905 fence_va
= va
+ sctx
->screen
->info
.num_render_backends
* 16 - 8;
907 case PIPE_QUERY_PRIMITIVES_EMITTED
:
908 case PIPE_QUERY_PRIMITIVES_GENERATED
:
909 case PIPE_QUERY_SO_STATISTICS
:
910 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
912 emit_sample_streamout(cs
, va
, query
->stream
);
914 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
916 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
917 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
919 case PIPE_QUERY_TIME_ELAPSED
:
922 case PIPE_QUERY_TIMESTAMP
:
923 si_cp_release_mem(sctx
, cs
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
924 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
925 EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
929 case PIPE_QUERY_PIPELINE_STATISTICS
: {
930 unsigned sample_size
= (query
->result_size
- 8) / 2;
933 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
934 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
936 radeon_emit(cs
, va
>> 32);
938 fence_va
= va
+ sample_size
;
944 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
948 si_cp_release_mem(sctx
, cs
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
949 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
950 EOP_DATA_SEL_VALUE_32BIT
,
951 query
->buffer
.buf
, fence_va
, 0x80000000,
956 static void si_query_hw_emit_stop(struct si_context
*sctx
,
957 struct si_query_hw
*query
)
961 /* The queries which need begin already called this in begin_query. */
962 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
963 si_need_gfx_cs_space(sctx
);
964 if (!si_query_buffer_alloc(sctx
, &query
->buffer
, query
->ops
->prepare_buffer
,
969 if (!query
->buffer
.buf
)
970 return; // previous buffer allocation failure
973 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
975 query
->ops
->emit_stop(sctx
, query
, query
->buffer
.buf
, va
);
977 query
->buffer
.results_end
+= query
->result_size
;
979 si_update_occlusion_query_state(sctx
, query
->b
.type
, -1);
980 si_update_prims_generated_query_state(sctx
, query
->b
.type
, -1);
982 if (query
->b
.type
== PIPE_QUERY_PIPELINE_STATISTICS
)
983 sctx
->num_pipeline_stat_queries
--;
986 static void emit_set_predicate(struct si_context
*ctx
,
987 struct si_resource
*buf
, uint64_t va
,
990 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
992 if (ctx
->chip_class
>= GFX9
) {
993 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
996 radeon_emit(cs
, va
>> 32);
998 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
1000 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
1002 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, buf
, RADEON_USAGE_READ
,
1006 static void si_emit_query_predication(struct si_context
*ctx
)
1008 struct si_query_hw
*query
= (struct si_query_hw
*)ctx
->render_cond
;
1009 struct si_query_buffer
*qbuf
;
1011 bool flag_wait
, invert
;
1016 if (ctx
->screen
->use_ngg_streamout
&&
1017 (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1018 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)) {
1019 assert(!"not implemented");
1022 invert
= ctx
->render_cond_invert
;
1023 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
1024 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
1026 if (query
->workaround_buf
) {
1027 op
= PRED_OP(PREDICATION_OP_BOOL64
);
1029 switch (query
->b
.type
) {
1030 case PIPE_QUERY_OCCLUSION_COUNTER
:
1031 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1032 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1033 op
= PRED_OP(PREDICATION_OP_ZPASS
);
1035 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1036 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1037 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
1046 /* if true then invert, see GL_ARB_conditional_render_inverted */
1048 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visible or overflow */
1050 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visible or no overflow */
1052 /* Use the value written by compute shader as a workaround. Note that
1053 * the wait flag does not apply in this predication mode.
1055 * The shader outputs the result value to L2. Workarounds only affect GFX8
1056 * and later, where the CP reads data from L2, so we don't need an
1059 if (query
->workaround_buf
) {
1060 uint64_t va
= query
->workaround_buf
->gpu_address
+ query
->workaround_offset
;
1061 emit_set_predicate(ctx
, query
->workaround_buf
, va
, op
);
1065 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
1067 /* emit predicate packets for all data blocks */
1068 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1069 unsigned results_base
= 0;
1070 uint64_t va_base
= qbuf
->buf
->gpu_address
;
1072 while (results_base
< qbuf
->results_end
) {
1073 uint64_t va
= va_base
+ results_base
;
1075 if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
) {
1076 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1077 emit_set_predicate(ctx
, qbuf
->buf
, va
+ 32 * stream
, op
);
1079 /* set CONTINUE bit for all packets except the first */
1080 op
|= PREDICATION_CONTINUE
;
1083 emit_set_predicate(ctx
, qbuf
->buf
, va
, op
);
1084 op
|= PREDICATION_CONTINUE
;
1087 results_base
+= query
->result_size
;
1092 static struct pipe_query
*si_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
1094 struct si_screen
*sscreen
=
1095 (struct si_screen
*)ctx
->screen
;
1097 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
1098 query_type
== PIPE_QUERY_GPU_FINISHED
||
1099 (query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
&&
1100 query_type
!= SI_QUERY_TIME_ELAPSED_SDMA
))
1101 return si_query_sw_create(query_type
);
1103 if (sscreen
->use_ngg_streamout
&&
1104 (query_type
== PIPE_QUERY_PRIMITIVES_EMITTED
||
1105 query_type
== PIPE_QUERY_PRIMITIVES_GENERATED
||
1106 query_type
== PIPE_QUERY_SO_STATISTICS
||
1107 query_type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1108 query_type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
))
1109 return gfx10_sh_query_create(sscreen
, query_type
, index
);
1111 return si_query_hw_create(sscreen
, query_type
, index
);
1114 static void si_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1116 struct si_context
*sctx
= (struct si_context
*)ctx
;
1117 struct si_query
*squery
= (struct si_query
*)query
;
1119 squery
->ops
->destroy(sctx
, squery
);
1122 static bool si_begin_query(struct pipe_context
*ctx
,
1123 struct pipe_query
*query
)
1125 struct si_context
*sctx
= (struct si_context
*)ctx
;
1126 struct si_query
*squery
= (struct si_query
*)query
;
1128 return squery
->ops
->begin(sctx
, squery
);
1131 bool si_query_hw_begin(struct si_context
*sctx
,
1132 struct si_query
*squery
)
1134 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1136 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
1141 if (!(query
->flags
& SI_QUERY_HW_FLAG_BEGIN_RESUMES
))
1142 si_query_buffer_reset(sctx
, &query
->buffer
);
1144 si_resource_reference(&query
->workaround_buf
, NULL
);
1146 si_query_hw_emit_start(sctx
, query
);
1147 if (!query
->buffer
.buf
)
1150 LIST_ADDTAIL(&query
->b
.active_list
, &sctx
->active_queries
);
1151 sctx
->num_cs_dw_queries_suspend
+= query
->b
.num_cs_dw_suspend
;
1155 static bool si_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1157 struct si_context
*sctx
= (struct si_context
*)ctx
;
1158 struct si_query
*squery
= (struct si_query
*)query
;
1160 return squery
->ops
->end(sctx
, squery
);
1163 bool si_query_hw_end(struct si_context
*sctx
,
1164 struct si_query
*squery
)
1166 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1168 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
)
1169 si_query_buffer_reset(sctx
, &query
->buffer
);
1171 si_query_hw_emit_stop(sctx
, query
);
1173 if (!(query
->flags
& SI_QUERY_HW_FLAG_NO_START
)) {
1174 LIST_DELINIT(&query
->b
.active_list
);
1175 sctx
->num_cs_dw_queries_suspend
-= query
->b
.num_cs_dw_suspend
;
1178 if (!query
->buffer
.buf
)
1184 static void si_get_hw_query_params(struct si_context
*sctx
,
1185 struct si_query_hw
*squery
, int index
,
1186 struct si_hw_query_params
*params
)
1188 unsigned max_rbs
= sctx
->screen
->info
.num_render_backends
;
1190 params
->pair_stride
= 0;
1191 params
->pair_count
= 1;
1193 switch (squery
->b
.type
) {
1194 case PIPE_QUERY_OCCLUSION_COUNTER
:
1195 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1196 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1197 params
->start_offset
= 0;
1198 params
->end_offset
= 8;
1199 params
->fence_offset
= max_rbs
* 16;
1200 params
->pair_stride
= 16;
1201 params
->pair_count
= max_rbs
;
1203 case PIPE_QUERY_TIME_ELAPSED
:
1204 params
->start_offset
= 0;
1205 params
->end_offset
= 8;
1206 params
->fence_offset
= 16;
1208 case PIPE_QUERY_TIMESTAMP
:
1209 params
->start_offset
= 0;
1210 params
->end_offset
= 0;
1211 params
->fence_offset
= 8;
1213 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1214 params
->start_offset
= 8;
1215 params
->end_offset
= 24;
1216 params
->fence_offset
= params
->end_offset
+ 4;
1218 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1219 params
->start_offset
= 0;
1220 params
->end_offset
= 16;
1221 params
->fence_offset
= params
->end_offset
+ 4;
1223 case PIPE_QUERY_SO_STATISTICS
:
1224 params
->start_offset
= 8 - index
* 8;
1225 params
->end_offset
= 24 - index
* 8;
1226 params
->fence_offset
= params
->end_offset
+ 4;
1228 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1229 params
->pair_count
= SI_MAX_STREAMS
;
1230 params
->pair_stride
= 32;
1231 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1232 params
->start_offset
= 0;
1233 params
->end_offset
= 16;
1235 /* We can re-use the high dword of the last 64-bit value as a
1236 * fence: it is initialized as 0, and the high bit is set by
1237 * the write of the streamout stats event.
1239 params
->fence_offset
= squery
->result_size
- 4;
1241 case PIPE_QUERY_PIPELINE_STATISTICS
:
1243 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1244 params
->start_offset
= offsets
[index
];
1245 params
->end_offset
= 88 + offsets
[index
];
1246 params
->fence_offset
= 2 * 88;
1250 unreachable("si_get_hw_query_params unsupported");
1254 static unsigned si_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1255 bool test_status_bit
)
1257 uint32_t *current_result
= (uint32_t*)map
;
1258 uint64_t start
, end
;
1260 start
= (uint64_t)current_result
[start_index
] |
1261 (uint64_t)current_result
[start_index
+1] << 32;
1262 end
= (uint64_t)current_result
[end_index
] |
1263 (uint64_t)current_result
[end_index
+1] << 32;
1265 if (!test_status_bit
||
1266 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1272 static void si_query_hw_add_result(struct si_screen
*sscreen
,
1273 struct si_query_hw
*query
,
1275 union pipe_query_result
*result
)
1277 unsigned max_rbs
= sscreen
->info
.num_render_backends
;
1279 switch (query
->b
.type
) {
1280 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1281 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1282 unsigned results_base
= i
* 16;
1284 si_query_read_result(buffer
+ results_base
, 0, 2, true);
1288 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1289 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1290 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1291 unsigned results_base
= i
* 16;
1292 result
->b
= result
->b
||
1293 si_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1297 case PIPE_QUERY_TIME_ELAPSED
:
1298 result
->u64
+= si_query_read_result(buffer
, 0, 2, false);
1300 case SI_QUERY_TIME_ELAPSED_SDMA
:
1301 result
->u64
+= si_query_read_result(buffer
, 0, 32/4, false);
1303 case PIPE_QUERY_TIMESTAMP
:
1304 result
->u64
= *(uint64_t*)buffer
;
1306 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1307 /* SAMPLE_STREAMOUTSTATS stores this structure:
1309 * u64 NumPrimitivesWritten;
1310 * u64 PrimitiveStorageNeeded;
1312 * We only need NumPrimitivesWritten here. */
1313 result
->u64
+= si_query_read_result(buffer
, 2, 6, true);
1315 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1316 /* Here we read PrimitiveStorageNeeded. */
1317 result
->u64
+= si_query_read_result(buffer
, 0, 4, true);
1319 case PIPE_QUERY_SO_STATISTICS
:
1320 result
->so_statistics
.num_primitives_written
+=
1321 si_query_read_result(buffer
, 2, 6, true);
1322 result
->so_statistics
.primitives_storage_needed
+=
1323 si_query_read_result(buffer
, 0, 4, true);
1325 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1326 result
->b
= result
->b
||
1327 si_query_read_result(buffer
, 2, 6, true) !=
1328 si_query_read_result(buffer
, 0, 4, true);
1330 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1331 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1332 result
->b
= result
->b
||
1333 si_query_read_result(buffer
, 2, 6, true) !=
1334 si_query_read_result(buffer
, 0, 4, true);
1335 buffer
= (char *)buffer
+ 32;
1338 case PIPE_QUERY_PIPELINE_STATISTICS
:
1339 result
->pipeline_statistics
.ps_invocations
+=
1340 si_query_read_result(buffer
, 0, 22, false);
1341 result
->pipeline_statistics
.c_primitives
+=
1342 si_query_read_result(buffer
, 2, 24, false);
1343 result
->pipeline_statistics
.c_invocations
+=
1344 si_query_read_result(buffer
, 4, 26, false);
1345 result
->pipeline_statistics
.vs_invocations
+=
1346 si_query_read_result(buffer
, 6, 28, false);
1347 result
->pipeline_statistics
.gs_invocations
+=
1348 si_query_read_result(buffer
, 8, 30, false);
1349 result
->pipeline_statistics
.gs_primitives
+=
1350 si_query_read_result(buffer
, 10, 32, false);
1351 result
->pipeline_statistics
.ia_primitives
+=
1352 si_query_read_result(buffer
, 12, 34, false);
1353 result
->pipeline_statistics
.ia_vertices
+=
1354 si_query_read_result(buffer
, 14, 36, false);
1355 result
->pipeline_statistics
.hs_invocations
+=
1356 si_query_read_result(buffer
, 16, 38, false);
1357 result
->pipeline_statistics
.ds_invocations
+=
1358 si_query_read_result(buffer
, 18, 40, false);
1359 result
->pipeline_statistics
.cs_invocations
+=
1360 si_query_read_result(buffer
, 20, 42, false);
1361 #if 0 /* for testing */
1362 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1363 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1364 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1365 result
->pipeline_statistics
.ia_vertices
,
1366 result
->pipeline_statistics
.ia_primitives
,
1367 result
->pipeline_statistics
.vs_invocations
,
1368 result
->pipeline_statistics
.hs_invocations
,
1369 result
->pipeline_statistics
.ds_invocations
,
1370 result
->pipeline_statistics
.gs_invocations
,
1371 result
->pipeline_statistics
.gs_primitives
,
1372 result
->pipeline_statistics
.c_invocations
,
1373 result
->pipeline_statistics
.c_primitives
,
1374 result
->pipeline_statistics
.ps_invocations
,
1375 result
->pipeline_statistics
.cs_invocations
);
1383 void si_query_hw_suspend(struct si_context
*sctx
, struct si_query
*query
)
1385 si_query_hw_emit_stop(sctx
, (struct si_query_hw
*)query
);
1388 void si_query_hw_resume(struct si_context
*sctx
, struct si_query
*query
)
1390 si_query_hw_emit_start(sctx
, (struct si_query_hw
*)query
);
1393 static const struct si_query_ops query_hw_ops
= {
1394 .destroy
= si_query_hw_destroy
,
1395 .begin
= si_query_hw_begin
,
1396 .end
= si_query_hw_end
,
1397 .get_result
= si_query_hw_get_result
,
1398 .get_result_resource
= si_query_hw_get_result_resource
,
1400 .suspend
= si_query_hw_suspend
,
1401 .resume
= si_query_hw_resume
,
1404 static bool si_get_query_result(struct pipe_context
*ctx
,
1405 struct pipe_query
*query
, bool wait
,
1406 union pipe_query_result
*result
)
1408 struct si_context
*sctx
= (struct si_context
*)ctx
;
1409 struct si_query
*squery
= (struct si_query
*)query
;
1411 return squery
->ops
->get_result(sctx
, squery
, wait
, result
);
1414 static void si_get_query_result_resource(struct pipe_context
*ctx
,
1415 struct pipe_query
*query
,
1417 enum pipe_query_value_type result_type
,
1419 struct pipe_resource
*resource
,
1422 struct si_context
*sctx
= (struct si_context
*)ctx
;
1423 struct si_query
*squery
= (struct si_query
*)query
;
1425 squery
->ops
->get_result_resource(sctx
, squery
, wait
, result_type
, index
,
1429 static void si_query_hw_clear_result(struct si_query_hw
*query
,
1430 union pipe_query_result
*result
)
1432 util_query_clear_result(result
, query
->b
.type
);
1435 bool si_query_hw_get_result(struct si_context
*sctx
,
1436 struct si_query
*squery
,
1437 bool wait
, union pipe_query_result
*result
)
1439 struct si_screen
*sscreen
= sctx
->screen
;
1440 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1441 struct si_query_buffer
*qbuf
;
1443 query
->ops
->clear_result(query
, result
);
1445 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1446 unsigned usage
= PIPE_TRANSFER_READ
|
1447 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
);
1448 unsigned results_base
= 0;
1451 if (squery
->b
.flushed
)
1452 map
= sctx
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
, usage
);
1454 map
= si_buffer_map_sync_with_rings(sctx
, qbuf
->buf
, usage
);
1459 while (results_base
!= qbuf
->results_end
) {
1460 query
->ops
->add_result(sscreen
, query
, map
+ results_base
,
1462 results_base
+= query
->result_size
;
1466 /* Convert the time to expected units. */
1467 if (squery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1468 squery
->type
== SI_QUERY_TIME_ELAPSED_SDMA
||
1469 squery
->type
== PIPE_QUERY_TIMESTAMP
) {
1470 result
->u64
= (1000000 * result
->u64
) / sscreen
->info
.clock_crystal_freq
;
1475 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
1476 struct si_query
*squery
,
1478 enum pipe_query_value_type result_type
,
1480 struct pipe_resource
*resource
,
1483 struct si_query_hw
*query
= (struct si_query_hw
*)squery
;
1484 struct si_query_buffer
*qbuf
;
1485 struct si_query_buffer
*qbuf_prev
;
1486 struct pipe_resource
*tmp_buffer
= NULL
;
1487 unsigned tmp_buffer_offset
= 0;
1488 struct si_qbo_state saved_state
= {};
1489 struct pipe_grid_info grid
= {};
1490 struct pipe_constant_buffer constant_buffer
= {};
1491 struct pipe_shader_buffer ssbo
[3];
1492 struct si_hw_query_params params
;
1494 uint32_t end_offset
;
1495 uint32_t result_stride
;
1496 uint32_t result_count
;
1498 uint32_t fence_offset
;
1499 uint32_t pair_stride
;
1500 uint32_t pair_count
;
1503 if (!sctx
->query_result_shader
) {
1504 sctx
->query_result_shader
= si_create_query_result_cs(sctx
);
1505 if (!sctx
->query_result_shader
)
1509 if (query
->buffer
.previous
) {
1510 u_suballocator_alloc(sctx
->allocator_zeroed_memory
, 16, 16,
1511 &tmp_buffer_offset
, &tmp_buffer
);
1516 si_save_qbo_state(sctx
, &saved_state
);
1518 si_get_hw_query_params(sctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1519 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1520 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1521 consts
.result_stride
= query
->result_size
;
1522 consts
.pair_stride
= params
.pair_stride
;
1523 consts
.pair_count
= params
.pair_count
;
1525 constant_buffer
.buffer_size
= sizeof(consts
);
1526 constant_buffer
.user_buffer
= &consts
;
1528 ssbo
[1].buffer
= tmp_buffer
;
1529 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1530 ssbo
[1].buffer_size
= 16;
1534 sctx
->b
.bind_compute_state(&sctx
->b
, sctx
->query_result_shader
);
1546 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1547 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
)
1549 else if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1550 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1551 consts
.config
|= 8 | 256;
1552 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1553 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1554 consts
.config
|= 32;
1556 switch (result_type
) {
1557 case PIPE_QUERY_TYPE_U64
:
1558 case PIPE_QUERY_TYPE_I64
:
1559 consts
.config
|= 64;
1561 case PIPE_QUERY_TYPE_I32
:
1562 consts
.config
|= 128;
1564 case PIPE_QUERY_TYPE_U32
:
1568 sctx
->flags
|= sctx
->screen
->barrier_flags
.cp_to_L2
;
1570 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1571 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1572 qbuf_prev
= qbuf
->previous
;
1573 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1574 consts
.config
&= ~3;
1575 if (qbuf
!= &query
->buffer
)
1580 /* Only read the last timestamp. */
1582 consts
.result_count
= 0;
1583 consts
.config
|= 16;
1584 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1587 sctx
->b
.set_constant_buffer(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1589 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1590 ssbo
[0].buffer_offset
= params
.start_offset
;
1591 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1593 if (!qbuf
->previous
) {
1594 ssbo
[2].buffer
= resource
;
1595 ssbo
[2].buffer_offset
= offset
;
1596 ssbo
[2].buffer_size
= 8;
1598 si_resource(resource
)->TC_L2_dirty
= true;
1601 sctx
->b
.set_shader_buffers(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
,
1604 if (wait
&& qbuf
== &query
->buffer
) {
1607 /* Wait for result availability. Wait only for readiness
1608 * of the last entry, since the fence writes should be
1609 * serialized in the CP.
1611 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1612 va
+= params
.fence_offset
;
1614 si_cp_wait_mem(sctx
, sctx
->gfx_cs
, va
, 0x80000000,
1615 0x80000000, WAIT_REG_MEM_EQUAL
);
1618 sctx
->b
.launch_grid(&sctx
->b
, &grid
);
1619 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
1622 si_restore_qbo_state(sctx
, &saved_state
);
1623 pipe_resource_reference(&tmp_buffer
, NULL
);
1626 static void si_render_condition(struct pipe_context
*ctx
,
1627 struct pipe_query
*query
,
1629 enum pipe_render_cond_flag mode
)
1631 struct si_context
*sctx
= (struct si_context
*)ctx
;
1632 struct si_query_hw
*squery
= (struct si_query_hw
*)query
;
1633 struct si_atom
*atom
= &sctx
->atoms
.s
.render_cond
;
1636 bool needs_workaround
= false;
1638 /* There was a firmware regression in GFX8 which causes successive
1639 * SET_PREDICATION packets to give the wrong answer for
1640 * non-inverted stream overflow predication.
1642 if (((sctx
->chip_class
== GFX8
&& sctx
->screen
->info
.pfp_fw_feature
< 49) ||
1643 (sctx
->chip_class
== GFX9
&& sctx
->screen
->info
.pfp_fw_feature
< 38)) &&
1645 (squery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
||
1646 (squery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
&&
1647 (squery
->buffer
.previous
||
1648 squery
->buffer
.results_end
> squery
->result_size
)))) {
1649 needs_workaround
= true;
1652 if (needs_workaround
&& !squery
->workaround_buf
) {
1653 bool old_force_off
= sctx
->render_cond_force_off
;
1654 sctx
->render_cond_force_off
= true;
1656 u_suballocator_alloc(
1657 sctx
->allocator_zeroed_memory
, 8, 8,
1658 &squery
->workaround_offset
,
1659 (struct pipe_resource
**)&squery
->workaround_buf
);
1661 /* Reset to NULL to avoid a redundant SET_PREDICATION
1662 * from launching the compute grid.
1664 sctx
->render_cond
= NULL
;
1666 ctx
->get_query_result_resource(
1667 ctx
, query
, true, PIPE_QUERY_TYPE_U64
, 0,
1668 &squery
->workaround_buf
->b
.b
, squery
->workaround_offset
);
1670 /* Settings this in the render cond atom is too late,
1671 * so set it here. */
1672 sctx
->flags
|= sctx
->screen
->barrier_flags
.L2_to_cp
|
1673 SI_CONTEXT_FLUSH_FOR_RENDER_COND
;
1675 sctx
->render_cond_force_off
= old_force_off
;
1679 sctx
->render_cond
= query
;
1680 sctx
->render_cond_invert
= condition
;
1681 sctx
->render_cond_mode
= mode
;
1683 si_set_atom_dirty(sctx
, atom
, query
!= NULL
);
1686 void si_suspend_queries(struct si_context
*sctx
)
1688 struct si_query
*query
;
1690 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, active_list
)
1691 query
->ops
->suspend(sctx
, query
);
1694 void si_resume_queries(struct si_context
*sctx
)
1696 struct si_query
*query
;
1698 /* Check CS space here. Resuming must not be interrupted by flushes. */
1699 si_need_gfx_cs_space(sctx
);
1701 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, active_list
)
1702 query
->ops
->resume(sctx
, query
);
1705 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1708 .query_type = SI_QUERY_##query_type_, \
1709 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1710 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1711 .group_id = group_id_ \
1714 #define X(name_, query_type_, type_, result_type_) \
1715 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1717 #define XG(group_, name_, query_type_, type_, result_type_) \
1718 XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1720 static struct pipe_driver_query_info si_driver_query_list
[] = {
1721 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1722 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1723 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1724 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1725 X("decompress-calls", DECOMPRESS_CALLS
, UINT64
, AVERAGE
),
1726 X("MRT-draw-calls", MRT_DRAW_CALLS
, UINT64
, AVERAGE
),
1727 X("prim-restart-calls", PRIM_RESTART_CALLS
, UINT64
, AVERAGE
),
1728 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1729 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1730 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1731 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1732 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1733 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1734 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1735 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1736 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1737 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1738 X("num-L2-invalidates", NUM_L2_INVALIDATES
, UINT64
, AVERAGE
),
1739 X("num-L2-writebacks", NUM_L2_WRITEBACKS
, UINT64
, AVERAGE
),
1740 X("num-resident-handles", NUM_RESIDENT_HANDLES
, UINT64
, AVERAGE
),
1741 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS
, UINT64
, AVERAGE
),
1742 X("tc-direct-slots", TC_DIRECT_SLOTS
, UINT64
, AVERAGE
),
1743 X("tc-num-syncs", TC_NUM_SYNCS
, UINT64
, AVERAGE
),
1744 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1745 X("gallium-thread-busy", GALLIUM_THREAD_BUSY
, UINT64
, AVERAGE
),
1746 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1747 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1748 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1749 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1750 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1751 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1752 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1753 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1754 X("GFX-BO-list-size", GFX_BO_LIST_SIZE
, UINT64
, AVERAGE
),
1755 X("GFX-IB-size", GFX_IB_SIZE
, UINT64
, AVERAGE
),
1756 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1757 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1758 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS
, UINT64
, CUMULATIVE
),
1759 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1760 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1761 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1762 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1764 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1765 * which use it as a fallback path to detect the GPU type.
1767 * Note: The names of these queries are significant for GPUPerfStudio
1768 * (and possibly their order as well). */
1769 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1770 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1771 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1772 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1773 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1775 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1776 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1777 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1779 /* The following queries must be at the end of the list because their
1780 * availability is adjusted dynamically based on the DRM version. */
1781 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1782 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
1783 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
1784 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
1785 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
1786 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
1787 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
1788 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
1789 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
1790 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
1791 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
1792 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
1793 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
1794 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
1797 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
1800 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
1801 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
1802 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
1803 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
1804 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY
, UINT64
, AVERAGE
),
1805 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
1807 X("pd-num-prims-accepted", PD_NUM_PRIMS_ACCEPTED
, UINT64
, AVERAGE
),
1808 X("pd-num-prims-rejected", PD_NUM_PRIMS_REJECTED
, UINT64
, AVERAGE
),
1809 X("pd-num-prims-ineligible", PD_NUM_PRIMS_INELIGIBLE
,UINT64
, AVERAGE
),
1816 static unsigned si_get_num_queries(struct si_screen
*sscreen
)
1819 if (sscreen
->info
.is_amdgpu
) {
1820 if (sscreen
->info
.chip_class
>= GFX8
)
1821 return ARRAY_SIZE(si_driver_query_list
);
1823 return ARRAY_SIZE(si_driver_query_list
) - 7;
1827 if (sscreen
->info
.has_read_registers_query
) {
1828 if (sscreen
->info
.chip_class
== GFX7
)
1829 return ARRAY_SIZE(si_driver_query_list
) - 6;
1831 return ARRAY_SIZE(si_driver_query_list
) - 7;
1834 return ARRAY_SIZE(si_driver_query_list
) - 21;
1837 static int si_get_driver_query_info(struct pipe_screen
*screen
,
1839 struct pipe_driver_query_info
*info
)
1841 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1842 unsigned num_queries
= si_get_num_queries(sscreen
);
1845 unsigned num_perfcounters
=
1846 si_get_perfcounter_info(sscreen
, 0, NULL
);
1848 return num_queries
+ num_perfcounters
;
1851 if (index
>= num_queries
)
1852 return si_get_perfcounter_info(sscreen
, index
- num_queries
, info
);
1854 *info
= si_driver_query_list
[index
];
1856 switch (info
->query_type
) {
1857 case SI_QUERY_REQUESTED_VRAM
:
1858 case SI_QUERY_VRAM_USAGE
:
1859 case SI_QUERY_MAPPED_VRAM
:
1860 info
->max_value
.u64
= sscreen
->info
.vram_size
;
1862 case SI_QUERY_REQUESTED_GTT
:
1863 case SI_QUERY_GTT_USAGE
:
1864 case SI_QUERY_MAPPED_GTT
:
1865 info
->max_value
.u64
= sscreen
->info
.gart_size
;
1867 case SI_QUERY_GPU_TEMPERATURE
:
1868 info
->max_value
.u64
= 125;
1870 case SI_QUERY_VRAM_VIS_USAGE
:
1871 info
->max_value
.u64
= sscreen
->info
.vram_vis_size
;
1875 if (info
->group_id
!= ~(unsigned)0 && sscreen
->perfcounters
)
1876 info
->group_id
+= sscreen
->perfcounters
->num_groups
;
1881 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1882 * performance counter groups, so be careful when changing this and related
1885 static int si_get_driver_query_group_info(struct pipe_screen
*screen
,
1887 struct pipe_driver_query_group_info
*info
)
1889 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1890 unsigned num_pc_groups
= 0;
1892 if (sscreen
->perfcounters
)
1893 num_pc_groups
= sscreen
->perfcounters
->num_groups
;
1896 return num_pc_groups
+ SI_NUM_SW_QUERY_GROUPS
;
1898 if (index
< num_pc_groups
)
1899 return si_get_perfcounter_group_info(sscreen
, index
, info
);
1901 index
-= num_pc_groups
;
1902 if (index
>= SI_NUM_SW_QUERY_GROUPS
)
1905 info
->name
= "GPIN";
1906 info
->max_active_queries
= 5;
1907 info
->num_queries
= 5;
1911 void si_init_query_functions(struct si_context
*sctx
)
1913 sctx
->b
.create_query
= si_create_query
;
1914 sctx
->b
.create_batch_query
= si_create_batch_query
;
1915 sctx
->b
.destroy_query
= si_destroy_query
;
1916 sctx
->b
.begin_query
= si_begin_query
;
1917 sctx
->b
.end_query
= si_end_query
;
1918 sctx
->b
.get_query_result
= si_get_query_result
;
1919 sctx
->b
.get_query_result_resource
= si_get_query_result_resource
;
1921 if (sctx
->has_graphics
) {
1922 sctx
->atoms
.s
.render_cond
.emit
= si_emit_query_predication
;
1923 sctx
->b
.render_condition
= si_render_condition
;
1926 list_inithead(&sctx
->active_queries
);
1929 void si_init_screen_query_functions(struct si_screen
*sscreen
)
1931 sscreen
->b
.get_driver_query_info
= si_get_driver_query_info
;
1932 sscreen
->b
.get_driver_query_group_info
= si_get_driver_query_group_info
;