2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/os_time.h"
32 #include "util/u_suballoc.h"
33 #include "amd/common/sid.h"
35 #define SI_MAX_STREAMS 4
37 struct si_hw_query_params
{
38 unsigned start_offset
;
40 unsigned fence_offset
;
45 /* Queries without buffer handling or suspend/resume. */
49 uint64_t begin_result
;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle
*fence
;
59 static void si_query_sw_destroy(struct si_screen
*sscreen
,
60 struct si_query
*rquery
)
62 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
64 sscreen
->b
.fence_reference(&sscreen
->b
, &query
->fence
, NULL
);
68 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
71 case SI_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
72 case SI_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
73 case SI_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
74 case SI_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
75 case SI_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
76 case SI_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
77 case SI_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
78 case SI_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
79 case SI_QUERY_GFX_BO_LIST_SIZE
: return RADEON_GFX_BO_LIST_COUNTER
;
80 case SI_QUERY_GFX_IB_SIZE
: return RADEON_GFX_IB_SIZE_COUNTER
;
81 case SI_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
82 case SI_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
83 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS
;
84 case SI_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
85 case SI_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
86 case SI_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
87 case SI_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
88 case SI_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
89 case SI_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
90 case SI_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
91 default: unreachable("query type does not correspond to winsys id");
95 static int64_t si_finish_dma_get_cpu_time(struct si_context
*sctx
)
97 struct pipe_fence_handle
*fence
= NULL
;
99 si_flush_dma_cs(sctx
, 0, &fence
);
101 sctx
->ws
->fence_wait(sctx
->ws
, fence
, PIPE_TIMEOUT_INFINITE
);
102 sctx
->ws
->fence_reference(&fence
, NULL
);
105 return os_time_get_nano();
108 static bool si_query_sw_begin(struct si_context
*sctx
,
109 struct si_query
*rquery
)
111 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
112 enum radeon_value_id ws_id
;
114 switch(query
->b
.type
) {
115 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
116 case PIPE_QUERY_GPU_FINISHED
:
118 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
119 query
->begin_result
= si_finish_dma_get_cpu_time(sctx
);
121 case SI_QUERY_DRAW_CALLS
:
122 query
->begin_result
= sctx
->num_draw_calls
;
124 case SI_QUERY_DECOMPRESS_CALLS
:
125 query
->begin_result
= sctx
->num_decompress_calls
;
127 case SI_QUERY_MRT_DRAW_CALLS
:
128 query
->begin_result
= sctx
->num_mrt_draw_calls
;
130 case SI_QUERY_PRIM_RESTART_CALLS
:
131 query
->begin_result
= sctx
->num_prim_restart_calls
;
133 case SI_QUERY_SPILL_DRAW_CALLS
:
134 query
->begin_result
= sctx
->num_spill_draw_calls
;
136 case SI_QUERY_COMPUTE_CALLS
:
137 query
->begin_result
= sctx
->num_compute_calls
;
139 case SI_QUERY_SPILL_COMPUTE_CALLS
:
140 query
->begin_result
= sctx
->num_spill_compute_calls
;
142 case SI_QUERY_DMA_CALLS
:
143 query
->begin_result
= sctx
->num_dma_calls
;
145 case SI_QUERY_CP_DMA_CALLS
:
146 query
->begin_result
= sctx
->num_cp_dma_calls
;
148 case SI_QUERY_NUM_VS_FLUSHES
:
149 query
->begin_result
= sctx
->num_vs_flushes
;
151 case SI_QUERY_NUM_PS_FLUSHES
:
152 query
->begin_result
= sctx
->num_ps_flushes
;
154 case SI_QUERY_NUM_CS_FLUSHES
:
155 query
->begin_result
= sctx
->num_cs_flushes
;
157 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
158 query
->begin_result
= sctx
->num_cb_cache_flushes
;
160 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
161 query
->begin_result
= sctx
->num_db_cache_flushes
;
163 case SI_QUERY_NUM_L2_INVALIDATES
:
164 query
->begin_result
= sctx
->num_L2_invalidates
;
166 case SI_QUERY_NUM_L2_WRITEBACKS
:
167 query
->begin_result
= sctx
->num_L2_writebacks
;
169 case SI_QUERY_NUM_RESIDENT_HANDLES
:
170 query
->begin_result
= sctx
->num_resident_handles
;
172 case SI_QUERY_TC_OFFLOADED_SLOTS
:
173 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
175 case SI_QUERY_TC_DIRECT_SLOTS
:
176 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
178 case SI_QUERY_TC_NUM_SYNCS
:
179 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
181 case SI_QUERY_REQUESTED_VRAM
:
182 case SI_QUERY_REQUESTED_GTT
:
183 case SI_QUERY_MAPPED_VRAM
:
184 case SI_QUERY_MAPPED_GTT
:
185 case SI_QUERY_VRAM_USAGE
:
186 case SI_QUERY_VRAM_VIS_USAGE
:
187 case SI_QUERY_GTT_USAGE
:
188 case SI_QUERY_GPU_TEMPERATURE
:
189 case SI_QUERY_CURRENT_GPU_SCLK
:
190 case SI_QUERY_CURRENT_GPU_MCLK
:
191 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
192 case SI_QUERY_NUM_MAPPED_BUFFERS
:
193 query
->begin_result
= 0;
195 case SI_QUERY_BUFFER_WAIT_TIME
:
196 case SI_QUERY_GFX_IB_SIZE
:
197 case SI_QUERY_NUM_GFX_IBS
:
198 case SI_QUERY_NUM_SDMA_IBS
:
199 case SI_QUERY_NUM_BYTES_MOVED
:
200 case SI_QUERY_NUM_EVICTIONS
:
201 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
202 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
203 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
206 case SI_QUERY_GFX_BO_LIST_SIZE
:
207 ws_id
= winsys_id_from_type(query
->b
.type
);
208 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
209 query
->begin_time
= sctx
->ws
->query_value(sctx
->ws
,
212 case SI_QUERY_CS_THREAD_BUSY
:
213 ws_id
= winsys_id_from_type(query
->b
.type
);
214 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
215 query
->begin_time
= os_time_get_nano();
217 case SI_QUERY_GALLIUM_THREAD_BUSY
:
218 query
->begin_result
=
219 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
220 query
->begin_time
= os_time_get_nano();
222 case SI_QUERY_GPU_LOAD
:
223 case SI_QUERY_GPU_SHADERS_BUSY
:
224 case SI_QUERY_GPU_TA_BUSY
:
225 case SI_QUERY_GPU_GDS_BUSY
:
226 case SI_QUERY_GPU_VGT_BUSY
:
227 case SI_QUERY_GPU_IA_BUSY
:
228 case SI_QUERY_GPU_SX_BUSY
:
229 case SI_QUERY_GPU_WD_BUSY
:
230 case SI_QUERY_GPU_BCI_BUSY
:
231 case SI_QUERY_GPU_SC_BUSY
:
232 case SI_QUERY_GPU_PA_BUSY
:
233 case SI_QUERY_GPU_DB_BUSY
:
234 case SI_QUERY_GPU_CP_BUSY
:
235 case SI_QUERY_GPU_CB_BUSY
:
236 case SI_QUERY_GPU_SDMA_BUSY
:
237 case SI_QUERY_GPU_PFP_BUSY
:
238 case SI_QUERY_GPU_MEQ_BUSY
:
239 case SI_QUERY_GPU_ME_BUSY
:
240 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
241 case SI_QUERY_GPU_CP_DMA_BUSY
:
242 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
243 query
->begin_result
= si_begin_counter(sctx
->screen
,
246 case SI_QUERY_NUM_COMPILATIONS
:
247 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
249 case SI_QUERY_NUM_SHADERS_CREATED
:
250 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
252 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
253 query
->begin_result
=
254 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
256 case SI_QUERY_GPIN_ASIC_ID
:
257 case SI_QUERY_GPIN_NUM_SIMD
:
258 case SI_QUERY_GPIN_NUM_RB
:
259 case SI_QUERY_GPIN_NUM_SPI
:
260 case SI_QUERY_GPIN_NUM_SE
:
263 unreachable("si_query_sw_begin: bad query type");
269 static bool si_query_sw_end(struct si_context
*sctx
,
270 struct si_query
*rquery
)
272 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
273 enum radeon_value_id ws_id
;
275 switch(query
->b
.type
) {
276 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
278 case PIPE_QUERY_GPU_FINISHED
:
279 sctx
->b
.flush(&sctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
281 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
282 query
->end_result
= si_finish_dma_get_cpu_time(sctx
);
284 case SI_QUERY_DRAW_CALLS
:
285 query
->end_result
= sctx
->num_draw_calls
;
287 case SI_QUERY_DECOMPRESS_CALLS
:
288 query
->end_result
= sctx
->num_decompress_calls
;
290 case SI_QUERY_MRT_DRAW_CALLS
:
291 query
->end_result
= sctx
->num_mrt_draw_calls
;
293 case SI_QUERY_PRIM_RESTART_CALLS
:
294 query
->end_result
= sctx
->num_prim_restart_calls
;
296 case SI_QUERY_SPILL_DRAW_CALLS
:
297 query
->end_result
= sctx
->num_spill_draw_calls
;
299 case SI_QUERY_COMPUTE_CALLS
:
300 query
->end_result
= sctx
->num_compute_calls
;
302 case SI_QUERY_SPILL_COMPUTE_CALLS
:
303 query
->end_result
= sctx
->num_spill_compute_calls
;
305 case SI_QUERY_DMA_CALLS
:
306 query
->end_result
= sctx
->num_dma_calls
;
308 case SI_QUERY_CP_DMA_CALLS
:
309 query
->end_result
= sctx
->num_cp_dma_calls
;
311 case SI_QUERY_NUM_VS_FLUSHES
:
312 query
->end_result
= sctx
->num_vs_flushes
;
314 case SI_QUERY_NUM_PS_FLUSHES
:
315 query
->end_result
= sctx
->num_ps_flushes
;
317 case SI_QUERY_NUM_CS_FLUSHES
:
318 query
->end_result
= sctx
->num_cs_flushes
;
320 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
321 query
->end_result
= sctx
->num_cb_cache_flushes
;
323 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
324 query
->end_result
= sctx
->num_db_cache_flushes
;
326 case SI_QUERY_NUM_L2_INVALIDATES
:
327 query
->end_result
= sctx
->num_L2_invalidates
;
329 case SI_QUERY_NUM_L2_WRITEBACKS
:
330 query
->end_result
= sctx
->num_L2_writebacks
;
332 case SI_QUERY_NUM_RESIDENT_HANDLES
:
333 query
->end_result
= sctx
->num_resident_handles
;
335 case SI_QUERY_TC_OFFLOADED_SLOTS
:
336 query
->end_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
338 case SI_QUERY_TC_DIRECT_SLOTS
:
339 query
->end_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
341 case SI_QUERY_TC_NUM_SYNCS
:
342 query
->end_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
344 case SI_QUERY_REQUESTED_VRAM
:
345 case SI_QUERY_REQUESTED_GTT
:
346 case SI_QUERY_MAPPED_VRAM
:
347 case SI_QUERY_MAPPED_GTT
:
348 case SI_QUERY_VRAM_USAGE
:
349 case SI_QUERY_VRAM_VIS_USAGE
:
350 case SI_QUERY_GTT_USAGE
:
351 case SI_QUERY_GPU_TEMPERATURE
:
352 case SI_QUERY_CURRENT_GPU_SCLK
:
353 case SI_QUERY_CURRENT_GPU_MCLK
:
354 case SI_QUERY_BUFFER_WAIT_TIME
:
355 case SI_QUERY_GFX_IB_SIZE
:
356 case SI_QUERY_NUM_MAPPED_BUFFERS
:
357 case SI_QUERY_NUM_GFX_IBS
:
358 case SI_QUERY_NUM_SDMA_IBS
:
359 case SI_QUERY_NUM_BYTES_MOVED
:
360 case SI_QUERY_NUM_EVICTIONS
:
361 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
362 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
363 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
366 case SI_QUERY_GFX_BO_LIST_SIZE
:
367 ws_id
= winsys_id_from_type(query
->b
.type
);
368 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
369 query
->end_time
= sctx
->ws
->query_value(sctx
->ws
,
372 case SI_QUERY_CS_THREAD_BUSY
:
373 ws_id
= winsys_id_from_type(query
->b
.type
);
374 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
375 query
->end_time
= os_time_get_nano();
377 case SI_QUERY_GALLIUM_THREAD_BUSY
:
379 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
380 query
->end_time
= os_time_get_nano();
382 case SI_QUERY_GPU_LOAD
:
383 case SI_QUERY_GPU_SHADERS_BUSY
:
384 case SI_QUERY_GPU_TA_BUSY
:
385 case SI_QUERY_GPU_GDS_BUSY
:
386 case SI_QUERY_GPU_VGT_BUSY
:
387 case SI_QUERY_GPU_IA_BUSY
:
388 case SI_QUERY_GPU_SX_BUSY
:
389 case SI_QUERY_GPU_WD_BUSY
:
390 case SI_QUERY_GPU_BCI_BUSY
:
391 case SI_QUERY_GPU_SC_BUSY
:
392 case SI_QUERY_GPU_PA_BUSY
:
393 case SI_QUERY_GPU_DB_BUSY
:
394 case SI_QUERY_GPU_CP_BUSY
:
395 case SI_QUERY_GPU_CB_BUSY
:
396 case SI_QUERY_GPU_SDMA_BUSY
:
397 case SI_QUERY_GPU_PFP_BUSY
:
398 case SI_QUERY_GPU_MEQ_BUSY
:
399 case SI_QUERY_GPU_ME_BUSY
:
400 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
401 case SI_QUERY_GPU_CP_DMA_BUSY
:
402 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
403 query
->end_result
= si_end_counter(sctx
->screen
,
405 query
->begin_result
);
406 query
->begin_result
= 0;
408 case SI_QUERY_NUM_COMPILATIONS
:
409 query
->end_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
411 case SI_QUERY_NUM_SHADERS_CREATED
:
412 query
->end_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
414 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
415 query
->end_result
= sctx
->last_tex_ps_draw_ratio
;
417 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
419 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
421 case SI_QUERY_GPIN_ASIC_ID
:
422 case SI_QUERY_GPIN_NUM_SIMD
:
423 case SI_QUERY_GPIN_NUM_RB
:
424 case SI_QUERY_GPIN_NUM_SPI
:
425 case SI_QUERY_GPIN_NUM_SE
:
428 unreachable("si_query_sw_end: bad query type");
434 static bool si_query_sw_get_result(struct si_context
*sctx
,
435 struct si_query
*rquery
,
437 union pipe_query_result
*result
)
439 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
441 switch (query
->b
.type
) {
442 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
443 /* Convert from cycles per millisecond to cycles per second (Hz). */
444 result
->timestamp_disjoint
.frequency
=
445 (uint64_t)sctx
->screen
->info
.clock_crystal_freq
* 1000;
446 result
->timestamp_disjoint
.disjoint
= false;
448 case PIPE_QUERY_GPU_FINISHED
: {
449 struct pipe_screen
*screen
= sctx
->b
.screen
;
450 struct pipe_context
*ctx
= rquery
->b
.flushed
? NULL
: &sctx
->b
;
452 result
->b
= screen
->fence_finish(screen
, ctx
, query
->fence
,
453 wait
? PIPE_TIMEOUT_INFINITE
: 0);
457 case SI_QUERY_GFX_BO_LIST_SIZE
:
458 result
->u64
= (query
->end_result
- query
->begin_result
) /
459 (query
->end_time
- query
->begin_time
);
461 case SI_QUERY_CS_THREAD_BUSY
:
462 case SI_QUERY_GALLIUM_THREAD_BUSY
:
463 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
464 (query
->end_time
- query
->begin_time
);
466 case SI_QUERY_GPIN_ASIC_ID
:
469 case SI_QUERY_GPIN_NUM_SIMD
:
470 result
->u32
= sctx
->screen
->info
.num_good_compute_units
;
472 case SI_QUERY_GPIN_NUM_RB
:
473 result
->u32
= sctx
->screen
->info
.num_render_backends
;
475 case SI_QUERY_GPIN_NUM_SPI
:
476 result
->u32
= 1; /* all supported chips have one SPI per SE */
478 case SI_QUERY_GPIN_NUM_SE
:
479 result
->u32
= sctx
->screen
->info
.max_se
;
483 result
->u64
= query
->end_result
- query
->begin_result
;
485 switch (query
->b
.type
) {
486 case SI_QUERY_BUFFER_WAIT_TIME
:
487 case SI_QUERY_GPU_TEMPERATURE
:
490 case SI_QUERY_CURRENT_GPU_SCLK
:
491 case SI_QUERY_CURRENT_GPU_MCLK
:
492 result
->u64
*= 1000000;
500 static struct si_query_ops sw_query_ops
= {
501 .destroy
= si_query_sw_destroy
,
502 .begin
= si_query_sw_begin
,
503 .end
= si_query_sw_end
,
504 .get_result
= si_query_sw_get_result
,
505 .get_result_resource
= NULL
508 static struct pipe_query
*si_query_sw_create(unsigned query_type
)
510 struct si_query_sw
*query
;
512 query
= CALLOC_STRUCT(si_query_sw
);
516 query
->b
.type
= query_type
;
517 query
->b
.ops
= &sw_query_ops
;
519 return (struct pipe_query
*)query
;
522 void si_query_hw_destroy(struct si_screen
*sscreen
,
523 struct si_query
*rquery
)
525 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
526 struct si_query_buffer
*prev
= query
->buffer
.previous
;
528 /* Release all query buffers. */
530 struct si_query_buffer
*qbuf
= prev
;
531 prev
= prev
->previous
;
532 r600_resource_reference(&qbuf
->buf
, NULL
);
536 r600_resource_reference(&query
->buffer
.buf
, NULL
);
537 r600_resource_reference(&query
->workaround_buf
, NULL
);
541 static struct r600_resource
*si_new_query_buffer(struct si_screen
*sscreen
,
542 struct si_query_hw
*query
)
544 unsigned buf_size
= MAX2(query
->result_size
,
545 sscreen
->info
.min_alloc_size
);
547 /* Queries are normally read by the CPU after
548 * being written by the gpu, hence staging is probably a good
551 struct r600_resource
*buf
= r600_resource(
552 pipe_buffer_create(&sscreen
->b
, 0,
553 PIPE_USAGE_STAGING
, buf_size
));
557 if (!query
->ops
->prepare_buffer(sscreen
, query
, buf
)) {
558 r600_resource_reference(&buf
, NULL
);
565 static bool si_query_hw_prepare_buffer(struct si_screen
*sscreen
,
566 struct si_query_hw
*query
,
567 struct r600_resource
*buffer
)
569 /* Callers ensure that the buffer is currently unused by the GPU. */
570 uint32_t *results
= sscreen
->ws
->buffer_map(buffer
->buf
, NULL
,
571 PIPE_TRANSFER_WRITE
|
572 PIPE_TRANSFER_UNSYNCHRONIZED
);
576 memset(results
, 0, buffer
->b
.b
.width0
);
578 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
579 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
580 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
581 unsigned max_rbs
= sscreen
->info
.num_render_backends
;
582 unsigned enabled_rb_mask
= sscreen
->info
.enabled_rb_mask
;
583 unsigned num_results
;
586 /* Set top bits for unused backends. */
587 num_results
= buffer
->b
.b
.width0
/ query
->result_size
;
588 for (j
= 0; j
< num_results
; j
++) {
589 for (i
= 0; i
< max_rbs
; i
++) {
590 if (!(enabled_rb_mask
& (1<<i
))) {
591 results
[(i
* 4)+1] = 0x80000000;
592 results
[(i
* 4)+3] = 0x80000000;
595 results
+= 4 * max_rbs
;
602 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
603 struct si_query
*rquery
,
605 enum pipe_query_value_type result_type
,
607 struct pipe_resource
*resource
,
610 static struct si_query_ops query_hw_ops
= {
611 .destroy
= si_query_hw_destroy
,
612 .begin
= si_query_hw_begin
,
613 .end
= si_query_hw_end
,
614 .get_result
= si_query_hw_get_result
,
615 .get_result_resource
= si_query_hw_get_result_resource
,
618 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
619 struct si_query_hw
*query
,
620 struct r600_resource
*buffer
,
622 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
623 struct si_query_hw
*query
,
624 struct r600_resource
*buffer
,
626 static void si_query_hw_add_result(struct si_screen
*sscreen
,
627 struct si_query_hw
*, void *buffer
,
628 union pipe_query_result
*result
);
629 static void si_query_hw_clear_result(struct si_query_hw
*,
630 union pipe_query_result
*);
632 static struct si_query_hw_ops query_hw_default_hw_ops
= {
633 .prepare_buffer
= si_query_hw_prepare_buffer
,
634 .emit_start
= si_query_hw_do_emit_start
,
635 .emit_stop
= si_query_hw_do_emit_stop
,
636 .clear_result
= si_query_hw_clear_result
,
637 .add_result
= si_query_hw_add_result
,
640 bool si_query_hw_init(struct si_screen
*sscreen
,
641 struct si_query_hw
*query
)
643 query
->buffer
.buf
= si_new_query_buffer(sscreen
, query
);
644 if (!query
->buffer
.buf
)
650 static struct pipe_query
*si_query_hw_create(struct si_screen
*sscreen
,
654 struct si_query_hw
*query
= CALLOC_STRUCT(si_query_hw
);
658 query
->b
.type
= query_type
;
659 query
->b
.ops
= &query_hw_ops
;
660 query
->ops
= &query_hw_default_hw_ops
;
662 switch (query_type
) {
663 case PIPE_QUERY_OCCLUSION_COUNTER
:
664 case PIPE_QUERY_OCCLUSION_PREDICATE
:
665 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
666 query
->result_size
= 16 * sscreen
->info
.num_render_backends
;
667 query
->result_size
+= 16; /* for the fence + alignment */
668 query
->num_cs_dw_end
= 6 + si_cp_write_fence_dwords(sscreen
);
670 case SI_QUERY_TIME_ELAPSED_SDMA
:
671 /* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
672 query
->result_size
= 64;
673 query
->num_cs_dw_end
= 0;
675 case PIPE_QUERY_TIME_ELAPSED
:
676 query
->result_size
= 24;
677 query
->num_cs_dw_end
= 8 + si_cp_write_fence_dwords(sscreen
);
679 case PIPE_QUERY_TIMESTAMP
:
680 query
->result_size
= 16;
681 query
->num_cs_dw_end
= 8 + si_cp_write_fence_dwords(sscreen
);
682 query
->flags
= SI_QUERY_HW_FLAG_NO_START
;
684 case PIPE_QUERY_PRIMITIVES_EMITTED
:
685 case PIPE_QUERY_PRIMITIVES_GENERATED
:
686 case PIPE_QUERY_SO_STATISTICS
:
687 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
688 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
689 query
->result_size
= 32;
690 query
->num_cs_dw_end
= 6;
691 query
->stream
= index
;
693 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
694 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
695 query
->result_size
= 32 * SI_MAX_STREAMS
;
696 query
->num_cs_dw_end
= 6 * SI_MAX_STREAMS
;
698 case PIPE_QUERY_PIPELINE_STATISTICS
:
699 /* 11 values on GCN. */
700 query
->result_size
= 11 * 16;
701 query
->result_size
+= 8; /* for the fence + alignment */
702 query
->num_cs_dw_end
= 6 + si_cp_write_fence_dwords(sscreen
);
710 if (!si_query_hw_init(sscreen
, query
)) {
715 return (struct pipe_query
*)query
;
718 static void si_update_occlusion_query_state(struct si_context
*sctx
,
719 unsigned type
, int diff
)
721 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
722 type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
723 type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
724 bool old_enable
= sctx
->num_occlusion_queries
!= 0;
725 bool old_perfect_enable
=
726 sctx
->num_perfect_occlusion_queries
!= 0;
727 bool enable
, perfect_enable
;
729 sctx
->num_occlusion_queries
+= diff
;
730 assert(sctx
->num_occlusion_queries
>= 0);
732 if (type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
733 sctx
->num_perfect_occlusion_queries
+= diff
;
734 assert(sctx
->num_perfect_occlusion_queries
>= 0);
737 enable
= sctx
->num_occlusion_queries
!= 0;
738 perfect_enable
= sctx
->num_perfect_occlusion_queries
!= 0;
740 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
741 si_set_occlusion_query_state(sctx
, old_perfect_enable
);
746 static unsigned event_type_for_stream(unsigned stream
)
750 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
751 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
752 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
753 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
757 static void emit_sample_streamout(struct radeon_cmdbuf
*cs
, uint64_t va
,
760 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
761 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(stream
)) | EVENT_INDEX(3));
763 radeon_emit(cs
, va
>> 32);
766 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
767 struct si_query_hw
*query
,
768 struct r600_resource
*buffer
,
771 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
773 switch (query
->b
.type
) {
774 case SI_QUERY_TIME_ELAPSED_SDMA
:
775 si_dma_emit_timestamp(sctx
, buffer
, va
- buffer
->gpu_address
);
777 case PIPE_QUERY_OCCLUSION_COUNTER
:
778 case PIPE_QUERY_OCCLUSION_PREDICATE
:
779 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
780 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
781 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
783 radeon_emit(cs
, va
>> 32);
785 case PIPE_QUERY_PRIMITIVES_EMITTED
:
786 case PIPE_QUERY_PRIMITIVES_GENERATED
:
787 case PIPE_QUERY_SO_STATISTICS
:
788 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
789 emit_sample_streamout(cs
, va
, query
->stream
);
791 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
792 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
793 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
795 case PIPE_QUERY_TIME_ELAPSED
:
796 /* Write the timestamp from the CP not waiting for
797 * outstanding draws (top-of-pipe).
799 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
800 radeon_emit(cs
, COPY_DATA_COUNT_SEL
|
801 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
802 COPY_DATA_DST_SEL(COPY_DATA_DST_MEM
));
806 radeon_emit(cs
, va
>> 32);
808 case PIPE_QUERY_PIPELINE_STATISTICS
:
809 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
810 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
812 radeon_emit(cs
, va
>> 32);
817 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
821 static void si_query_hw_emit_start(struct si_context
*sctx
,
822 struct si_query_hw
*query
)
826 if (!query
->buffer
.buf
)
827 return; // previous buffer allocation failure
829 si_update_occlusion_query_state(sctx
, query
->b
.type
, 1);
830 si_update_prims_generated_query_state(sctx
, query
->b
.type
, 1);
832 if (query
->b
.type
!= SI_QUERY_TIME_ELAPSED_SDMA
)
833 si_need_gfx_cs_space(sctx
);
835 /* Get a new query buffer if needed. */
836 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
837 struct si_query_buffer
*qbuf
= MALLOC_STRUCT(si_query_buffer
);
838 *qbuf
= query
->buffer
;
839 query
->buffer
.results_end
= 0;
840 query
->buffer
.previous
= qbuf
;
841 query
->buffer
.buf
= si_new_query_buffer(sctx
->screen
, query
);
842 if (!query
->buffer
.buf
)
846 /* emit begin query */
847 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
849 query
->ops
->emit_start(sctx
, query
, query
->buffer
.buf
, va
);
851 sctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
854 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
855 struct si_query_hw
*query
,
856 struct r600_resource
*buffer
,
859 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
860 uint64_t fence_va
= 0;
862 switch (query
->b
.type
) {
863 case SI_QUERY_TIME_ELAPSED_SDMA
:
864 si_dma_emit_timestamp(sctx
, buffer
, va
+ 32 - buffer
->gpu_address
);
866 case PIPE_QUERY_OCCLUSION_COUNTER
:
867 case PIPE_QUERY_OCCLUSION_PREDICATE
:
868 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
870 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
871 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
873 radeon_emit(cs
, va
>> 32);
875 fence_va
= va
+ sctx
->screen
->info
.num_render_backends
* 16 - 8;
877 case PIPE_QUERY_PRIMITIVES_EMITTED
:
878 case PIPE_QUERY_PRIMITIVES_GENERATED
:
879 case PIPE_QUERY_SO_STATISTICS
:
880 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
882 emit_sample_streamout(cs
, va
, query
->stream
);
884 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
886 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
887 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
889 case PIPE_QUERY_TIME_ELAPSED
:
892 case PIPE_QUERY_TIMESTAMP
:
893 si_cp_release_mem(sctx
, V_028A90_BOTTOM_OF_PIPE_TS
,
895 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
,
896 EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
900 case PIPE_QUERY_PIPELINE_STATISTICS
: {
901 unsigned sample_size
= (query
->result_size
- 8) / 2;
904 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
905 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
907 radeon_emit(cs
, va
>> 32);
909 fence_va
= va
+ sample_size
;
915 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
919 si_cp_release_mem(sctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
921 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
,
922 EOP_DATA_SEL_VALUE_32BIT
,
923 query
->buffer
.buf
, fence_va
, 0x80000000,
928 static void si_query_hw_emit_stop(struct si_context
*sctx
,
929 struct si_query_hw
*query
)
933 if (!query
->buffer
.buf
)
934 return; // previous buffer allocation failure
936 /* The queries which need begin already called this in begin_query. */
937 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
)
938 si_need_gfx_cs_space(sctx
);
941 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
943 query
->ops
->emit_stop(sctx
, query
, query
->buffer
.buf
, va
);
945 query
->buffer
.results_end
+= query
->result_size
;
947 if (!(query
->flags
& SI_QUERY_HW_FLAG_NO_START
))
948 sctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
950 si_update_occlusion_query_state(sctx
, query
->b
.type
, -1);
951 si_update_prims_generated_query_state(sctx
, query
->b
.type
, -1);
954 static void emit_set_predicate(struct si_context
*ctx
,
955 struct r600_resource
*buf
, uint64_t va
,
958 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
960 if (ctx
->chip_class
>= GFX9
) {
961 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
964 radeon_emit(cs
, va
>> 32);
966 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
968 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
970 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, buf
, RADEON_USAGE_READ
,
974 static void si_emit_query_predication(struct si_context
*ctx
)
976 struct si_query_hw
*query
= (struct si_query_hw
*)ctx
->render_cond
;
977 struct si_query_buffer
*qbuf
;
979 bool flag_wait
, invert
;
984 invert
= ctx
->render_cond_invert
;
985 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
986 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
988 if (query
->workaround_buf
) {
989 op
= PRED_OP(PREDICATION_OP_BOOL64
);
991 switch (query
->b
.type
) {
992 case PIPE_QUERY_OCCLUSION_COUNTER
:
993 case PIPE_QUERY_OCCLUSION_PREDICATE
:
994 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
995 op
= PRED_OP(PREDICATION_OP_ZPASS
);
997 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
998 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
999 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
1008 /* if true then invert, see GL_ARB_conditional_render_inverted */
1010 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visible or overflow */
1012 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visible or no overflow */
1014 /* Use the value written by compute shader as a workaround. Note that
1015 * the wait flag does not apply in this predication mode.
1017 * The shader outputs the result value to L2. Workarounds only affect VI
1018 * and later, where the CP reads data from L2, so we don't need an
1021 if (query
->workaround_buf
) {
1022 uint64_t va
= query
->workaround_buf
->gpu_address
+ query
->workaround_offset
;
1023 emit_set_predicate(ctx
, query
->workaround_buf
, va
, op
);
1027 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
1029 /* emit predicate packets for all data blocks */
1030 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1031 unsigned results_base
= 0;
1032 uint64_t va_base
= qbuf
->buf
->gpu_address
;
1034 while (results_base
< qbuf
->results_end
) {
1035 uint64_t va
= va_base
+ results_base
;
1037 if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
) {
1038 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1039 emit_set_predicate(ctx
, qbuf
->buf
, va
+ 32 * stream
, op
);
1041 /* set CONTINUE bit for all packets except the first */
1042 op
|= PREDICATION_CONTINUE
;
1045 emit_set_predicate(ctx
, qbuf
->buf
, va
, op
);
1046 op
|= PREDICATION_CONTINUE
;
1049 results_base
+= query
->result_size
;
1054 static struct pipe_query
*si_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
1056 struct si_screen
*sscreen
=
1057 (struct si_screen
*)ctx
->screen
;
1059 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
1060 query_type
== PIPE_QUERY_GPU_FINISHED
||
1061 (query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
&&
1062 query_type
!= SI_QUERY_TIME_ELAPSED_SDMA
))
1063 return si_query_sw_create(query_type
);
1065 return si_query_hw_create(sscreen
, query_type
, index
);
1068 static void si_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1070 struct si_context
*sctx
= (struct si_context
*)ctx
;
1071 struct si_query
*rquery
= (struct si_query
*)query
;
1073 rquery
->ops
->destroy(sctx
->screen
, rquery
);
1076 static boolean
si_begin_query(struct pipe_context
*ctx
,
1077 struct pipe_query
*query
)
1079 struct si_context
*sctx
= (struct si_context
*)ctx
;
1080 struct si_query
*rquery
= (struct si_query
*)query
;
1082 return rquery
->ops
->begin(sctx
, rquery
);
1085 void si_query_hw_reset_buffers(struct si_context
*sctx
,
1086 struct si_query_hw
*query
)
1088 struct si_query_buffer
*prev
= query
->buffer
.previous
;
1090 /* Discard the old query buffers. */
1092 struct si_query_buffer
*qbuf
= prev
;
1093 prev
= prev
->previous
;
1094 r600_resource_reference(&qbuf
->buf
, NULL
);
1098 query
->buffer
.results_end
= 0;
1099 query
->buffer
.previous
= NULL
;
1101 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1102 if (si_rings_is_buffer_referenced(sctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
1103 !sctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
1104 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1105 query
->buffer
.buf
= si_new_query_buffer(sctx
->screen
, query
);
1107 if (!query
->ops
->prepare_buffer(sctx
->screen
, query
, query
->buffer
.buf
))
1108 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1112 bool si_query_hw_begin(struct si_context
*sctx
,
1113 struct si_query
*rquery
)
1115 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1117 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
1122 if (!(query
->flags
& SI_QUERY_HW_FLAG_BEGIN_RESUMES
))
1123 si_query_hw_reset_buffers(sctx
, query
);
1125 r600_resource_reference(&query
->workaround_buf
, NULL
);
1127 si_query_hw_emit_start(sctx
, query
);
1128 if (!query
->buffer
.buf
)
1131 LIST_ADDTAIL(&query
->list
, &sctx
->active_queries
);
1135 static bool si_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1137 struct si_context
*sctx
= (struct si_context
*)ctx
;
1138 struct si_query
*rquery
= (struct si_query
*)query
;
1140 return rquery
->ops
->end(sctx
, rquery
);
1143 bool si_query_hw_end(struct si_context
*sctx
,
1144 struct si_query
*rquery
)
1146 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1148 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
)
1149 si_query_hw_reset_buffers(sctx
, query
);
1151 si_query_hw_emit_stop(sctx
, query
);
1153 if (!(query
->flags
& SI_QUERY_HW_FLAG_NO_START
))
1154 LIST_DELINIT(&query
->list
);
1156 if (!query
->buffer
.buf
)
1162 static void si_get_hw_query_params(struct si_context
*sctx
,
1163 struct si_query_hw
*rquery
, int index
,
1164 struct si_hw_query_params
*params
)
1166 unsigned max_rbs
= sctx
->screen
->info
.num_render_backends
;
1168 params
->pair_stride
= 0;
1169 params
->pair_count
= 1;
1171 switch (rquery
->b
.type
) {
1172 case PIPE_QUERY_OCCLUSION_COUNTER
:
1173 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1174 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1175 params
->start_offset
= 0;
1176 params
->end_offset
= 8;
1177 params
->fence_offset
= max_rbs
* 16;
1178 params
->pair_stride
= 16;
1179 params
->pair_count
= max_rbs
;
1181 case PIPE_QUERY_TIME_ELAPSED
:
1182 params
->start_offset
= 0;
1183 params
->end_offset
= 8;
1184 params
->fence_offset
= 16;
1186 case PIPE_QUERY_TIMESTAMP
:
1187 params
->start_offset
= 0;
1188 params
->end_offset
= 0;
1189 params
->fence_offset
= 8;
1191 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1192 params
->start_offset
= 8;
1193 params
->end_offset
= 24;
1194 params
->fence_offset
= params
->end_offset
+ 4;
1196 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1197 params
->start_offset
= 0;
1198 params
->end_offset
= 16;
1199 params
->fence_offset
= params
->end_offset
+ 4;
1201 case PIPE_QUERY_SO_STATISTICS
:
1202 params
->start_offset
= 8 - index
* 8;
1203 params
->end_offset
= 24 - index
* 8;
1204 params
->fence_offset
= params
->end_offset
+ 4;
1206 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1207 params
->pair_count
= SI_MAX_STREAMS
;
1208 params
->pair_stride
= 32;
1209 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1210 params
->start_offset
= 0;
1211 params
->end_offset
= 16;
1213 /* We can re-use the high dword of the last 64-bit value as a
1214 * fence: it is initialized as 0, and the high bit is set by
1215 * the write of the streamout stats event.
1217 params
->fence_offset
= rquery
->result_size
- 4;
1219 case PIPE_QUERY_PIPELINE_STATISTICS
:
1221 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1222 params
->start_offset
= offsets
[index
];
1223 params
->end_offset
= 88 + offsets
[index
];
1224 params
->fence_offset
= 2 * 88;
1228 unreachable("si_get_hw_query_params unsupported");
1232 static unsigned si_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1233 bool test_status_bit
)
1235 uint32_t *current_result
= (uint32_t*)map
;
1236 uint64_t start
, end
;
1238 start
= (uint64_t)current_result
[start_index
] |
1239 (uint64_t)current_result
[start_index
+1] << 32;
1240 end
= (uint64_t)current_result
[end_index
] |
1241 (uint64_t)current_result
[end_index
+1] << 32;
1243 if (!test_status_bit
||
1244 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1250 static void si_query_hw_add_result(struct si_screen
*sscreen
,
1251 struct si_query_hw
*query
,
1253 union pipe_query_result
*result
)
1255 unsigned max_rbs
= sscreen
->info
.num_render_backends
;
1257 switch (query
->b
.type
) {
1258 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1259 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1260 unsigned results_base
= i
* 16;
1262 si_query_read_result(buffer
+ results_base
, 0, 2, true);
1266 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1267 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1268 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1269 unsigned results_base
= i
* 16;
1270 result
->b
= result
->b
||
1271 si_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1275 case PIPE_QUERY_TIME_ELAPSED
:
1276 result
->u64
+= si_query_read_result(buffer
, 0, 2, false);
1278 case SI_QUERY_TIME_ELAPSED_SDMA
:
1279 result
->u64
+= si_query_read_result(buffer
, 0, 32/4, false);
1281 case PIPE_QUERY_TIMESTAMP
:
1282 result
->u64
= *(uint64_t*)buffer
;
1284 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1285 /* SAMPLE_STREAMOUTSTATS stores this structure:
1287 * u64 NumPrimitivesWritten;
1288 * u64 PrimitiveStorageNeeded;
1290 * We only need NumPrimitivesWritten here. */
1291 result
->u64
+= si_query_read_result(buffer
, 2, 6, true);
1293 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1294 /* Here we read PrimitiveStorageNeeded. */
1295 result
->u64
+= si_query_read_result(buffer
, 0, 4, true);
1297 case PIPE_QUERY_SO_STATISTICS
:
1298 result
->so_statistics
.num_primitives_written
+=
1299 si_query_read_result(buffer
, 2, 6, true);
1300 result
->so_statistics
.primitives_storage_needed
+=
1301 si_query_read_result(buffer
, 0, 4, true);
1303 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1304 result
->b
= result
->b
||
1305 si_query_read_result(buffer
, 2, 6, true) !=
1306 si_query_read_result(buffer
, 0, 4, true);
1308 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1309 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1310 result
->b
= result
->b
||
1311 si_query_read_result(buffer
, 2, 6, true) !=
1312 si_query_read_result(buffer
, 0, 4, true);
1313 buffer
= (char *)buffer
+ 32;
1316 case PIPE_QUERY_PIPELINE_STATISTICS
:
1317 result
->pipeline_statistics
.ps_invocations
+=
1318 si_query_read_result(buffer
, 0, 22, false);
1319 result
->pipeline_statistics
.c_primitives
+=
1320 si_query_read_result(buffer
, 2, 24, false);
1321 result
->pipeline_statistics
.c_invocations
+=
1322 si_query_read_result(buffer
, 4, 26, false);
1323 result
->pipeline_statistics
.vs_invocations
+=
1324 si_query_read_result(buffer
, 6, 28, false);
1325 result
->pipeline_statistics
.gs_invocations
+=
1326 si_query_read_result(buffer
, 8, 30, false);
1327 result
->pipeline_statistics
.gs_primitives
+=
1328 si_query_read_result(buffer
, 10, 32, false);
1329 result
->pipeline_statistics
.ia_primitives
+=
1330 si_query_read_result(buffer
, 12, 34, false);
1331 result
->pipeline_statistics
.ia_vertices
+=
1332 si_query_read_result(buffer
, 14, 36, false);
1333 result
->pipeline_statistics
.hs_invocations
+=
1334 si_query_read_result(buffer
, 16, 38, false);
1335 result
->pipeline_statistics
.ds_invocations
+=
1336 si_query_read_result(buffer
, 18, 40, false);
1337 result
->pipeline_statistics
.cs_invocations
+=
1338 si_query_read_result(buffer
, 20, 42, false);
1339 #if 0 /* for testing */
1340 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1341 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1342 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1343 result
->pipeline_statistics
.ia_vertices
,
1344 result
->pipeline_statistics
.ia_primitives
,
1345 result
->pipeline_statistics
.vs_invocations
,
1346 result
->pipeline_statistics
.hs_invocations
,
1347 result
->pipeline_statistics
.ds_invocations
,
1348 result
->pipeline_statistics
.gs_invocations
,
1349 result
->pipeline_statistics
.gs_primitives
,
1350 result
->pipeline_statistics
.c_invocations
,
1351 result
->pipeline_statistics
.c_primitives
,
1352 result
->pipeline_statistics
.ps_invocations
,
1353 result
->pipeline_statistics
.cs_invocations
);
1361 static boolean
si_get_query_result(struct pipe_context
*ctx
,
1362 struct pipe_query
*query
, boolean wait
,
1363 union pipe_query_result
*result
)
1365 struct si_context
*sctx
= (struct si_context
*)ctx
;
1366 struct si_query
*rquery
= (struct si_query
*)query
;
1368 return rquery
->ops
->get_result(sctx
, rquery
, wait
, result
);
1371 static void si_get_query_result_resource(struct pipe_context
*ctx
,
1372 struct pipe_query
*query
,
1374 enum pipe_query_value_type result_type
,
1376 struct pipe_resource
*resource
,
1379 struct si_context
*sctx
= (struct si_context
*)ctx
;
1380 struct si_query
*rquery
= (struct si_query
*)query
;
1382 rquery
->ops
->get_result_resource(sctx
, rquery
, wait
, result_type
, index
,
1386 static void si_query_hw_clear_result(struct si_query_hw
*query
,
1387 union pipe_query_result
*result
)
1389 util_query_clear_result(result
, query
->b
.type
);
1392 bool si_query_hw_get_result(struct si_context
*sctx
,
1393 struct si_query
*rquery
,
1394 bool wait
, union pipe_query_result
*result
)
1396 struct si_screen
*sscreen
= sctx
->screen
;
1397 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1398 struct si_query_buffer
*qbuf
;
1400 query
->ops
->clear_result(query
, result
);
1402 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1403 unsigned usage
= PIPE_TRANSFER_READ
|
1404 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
);
1405 unsigned results_base
= 0;
1408 if (rquery
->b
.flushed
)
1409 map
= sctx
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
, usage
);
1411 map
= si_buffer_map_sync_with_rings(sctx
, qbuf
->buf
, usage
);
1416 while (results_base
!= qbuf
->results_end
) {
1417 query
->ops
->add_result(sscreen
, query
, map
+ results_base
,
1419 results_base
+= query
->result_size
;
1423 /* Convert the time to expected units. */
1424 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1425 rquery
->type
== SI_QUERY_TIME_ELAPSED_SDMA
||
1426 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1427 result
->u64
= (1000000 * result
->u64
) / sscreen
->info
.clock_crystal_freq
;
1432 static void si_restore_qbo_state(struct si_context
*sctx
,
1433 struct si_qbo_state
*st
)
1435 sctx
->b
.bind_compute_state(&sctx
->b
, st
->saved_compute
);
1437 sctx
->b
.set_constant_buffer(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1438 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1440 sctx
->b
.set_shader_buffers(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
);
1441 for (unsigned i
= 0; i
< 3; ++i
)
1442 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1445 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
1446 struct si_query
*rquery
,
1448 enum pipe_query_value_type result_type
,
1450 struct pipe_resource
*resource
,
1453 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1454 struct si_query_buffer
*qbuf
;
1455 struct si_query_buffer
*qbuf_prev
;
1456 struct pipe_resource
*tmp_buffer
= NULL
;
1457 unsigned tmp_buffer_offset
= 0;
1458 struct si_qbo_state saved_state
= {};
1459 struct pipe_grid_info grid
= {};
1460 struct pipe_constant_buffer constant_buffer
= {};
1461 struct pipe_shader_buffer ssbo
[3];
1462 struct si_hw_query_params params
;
1464 uint32_t end_offset
;
1465 uint32_t result_stride
;
1466 uint32_t result_count
;
1468 uint32_t fence_offset
;
1469 uint32_t pair_stride
;
1470 uint32_t pair_count
;
1473 if (!sctx
->query_result_shader
) {
1474 sctx
->query_result_shader
= si_create_query_result_cs(sctx
);
1475 if (!sctx
->query_result_shader
)
1479 if (query
->buffer
.previous
) {
1480 u_suballocator_alloc(sctx
->allocator_zeroed_memory
, 16, 16,
1481 &tmp_buffer_offset
, &tmp_buffer
);
1486 si_save_qbo_state(sctx
, &saved_state
);
1488 si_get_hw_query_params(sctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1489 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1490 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1491 consts
.result_stride
= query
->result_size
;
1492 consts
.pair_stride
= params
.pair_stride
;
1493 consts
.pair_count
= params
.pair_count
;
1495 constant_buffer
.buffer_size
= sizeof(consts
);
1496 constant_buffer
.user_buffer
= &consts
;
1498 ssbo
[1].buffer
= tmp_buffer
;
1499 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1500 ssbo
[1].buffer_size
= 16;
1504 sctx
->b
.bind_compute_state(&sctx
->b
, sctx
->query_result_shader
);
1516 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1517 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
)
1519 else if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1520 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1521 consts
.config
|= 8 | 256;
1522 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1523 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1524 consts
.config
|= 32;
1526 switch (result_type
) {
1527 case PIPE_QUERY_TYPE_U64
:
1528 case PIPE_QUERY_TYPE_I64
:
1529 consts
.config
|= 64;
1531 case PIPE_QUERY_TYPE_I32
:
1532 consts
.config
|= 128;
1534 case PIPE_QUERY_TYPE_U32
:
1538 sctx
->flags
|= sctx
->screen
->barrier_flags
.cp_to_L2
;
1540 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1541 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1542 qbuf_prev
= qbuf
->previous
;
1543 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1544 consts
.config
&= ~3;
1545 if (qbuf
!= &query
->buffer
)
1550 /* Only read the last timestamp. */
1552 consts
.result_count
= 0;
1553 consts
.config
|= 16;
1554 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1557 sctx
->b
.set_constant_buffer(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1559 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1560 ssbo
[0].buffer_offset
= params
.start_offset
;
1561 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1563 if (!qbuf
->previous
) {
1564 ssbo
[2].buffer
= resource
;
1565 ssbo
[2].buffer_offset
= offset
;
1566 ssbo
[2].buffer_size
= 8;
1568 r600_resource(resource
)->TC_L2_dirty
= true;
1571 sctx
->b
.set_shader_buffers(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
);
1573 if (wait
&& qbuf
== &query
->buffer
) {
1576 /* Wait for result availability. Wait only for readiness
1577 * of the last entry, since the fence writes should be
1578 * serialized in the CP.
1580 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1581 va
+= params
.fence_offset
;
1583 si_cp_wait_mem(sctx
, va
, 0x80000000, 0x80000000, 0);
1586 sctx
->b
.launch_grid(&sctx
->b
, &grid
);
1587 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
1590 si_restore_qbo_state(sctx
, &saved_state
);
1591 pipe_resource_reference(&tmp_buffer
, NULL
);
1594 static void si_render_condition(struct pipe_context
*ctx
,
1595 struct pipe_query
*query
,
1597 enum pipe_render_cond_flag mode
)
1599 struct si_context
*sctx
= (struct si_context
*)ctx
;
1600 struct si_query_hw
*rquery
= (struct si_query_hw
*)query
;
1601 struct si_atom
*atom
= &sctx
->atoms
.s
.render_cond
;
1604 bool needs_workaround
= false;
1606 /* There was a firmware regression in VI which causes successive
1607 * SET_PREDICATION packets to give the wrong answer for
1608 * non-inverted stream overflow predication.
1610 if (((sctx
->chip_class
== VI
&& sctx
->screen
->info
.pfp_fw_feature
< 49) ||
1611 (sctx
->chip_class
== GFX9
&& sctx
->screen
->info
.pfp_fw_feature
< 38)) &&
1613 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
||
1614 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
&&
1615 (rquery
->buffer
.previous
||
1616 rquery
->buffer
.results_end
> rquery
->result_size
)))) {
1617 needs_workaround
= true;
1620 if (needs_workaround
&& !rquery
->workaround_buf
) {
1621 bool old_force_off
= sctx
->render_cond_force_off
;
1622 sctx
->render_cond_force_off
= true;
1624 u_suballocator_alloc(
1625 sctx
->allocator_zeroed_memory
, 8, 8,
1626 &rquery
->workaround_offset
,
1627 (struct pipe_resource
**)&rquery
->workaround_buf
);
1629 /* Reset to NULL to avoid a redundant SET_PREDICATION
1630 * from launching the compute grid.
1632 sctx
->render_cond
= NULL
;
1634 ctx
->get_query_result_resource(
1635 ctx
, query
, true, PIPE_QUERY_TYPE_U64
, 0,
1636 &rquery
->workaround_buf
->b
.b
, rquery
->workaround_offset
);
1638 /* Settings this in the render cond atom is too late,
1639 * so set it here. */
1640 sctx
->flags
|= sctx
->screen
->barrier_flags
.L2_to_cp
|
1641 SI_CONTEXT_FLUSH_FOR_RENDER_COND
;
1643 sctx
->render_cond_force_off
= old_force_off
;
1647 sctx
->render_cond
= query
;
1648 sctx
->render_cond_invert
= condition
;
1649 sctx
->render_cond_mode
= mode
;
1651 si_set_atom_dirty(sctx
, atom
, query
!= NULL
);
1654 void si_suspend_queries(struct si_context
*sctx
)
1656 struct si_query_hw
*query
;
1658 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, list
) {
1659 si_query_hw_emit_stop(sctx
, query
);
1661 assert(sctx
->num_cs_dw_queries_suspend
== 0);
1664 void si_resume_queries(struct si_context
*sctx
)
1666 struct si_query_hw
*query
;
1668 assert(sctx
->num_cs_dw_queries_suspend
== 0);
1670 /* Check CS space here. Resuming must not be interrupted by flushes. */
1671 si_need_gfx_cs_space(sctx
);
1673 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, list
) {
1674 si_query_hw_emit_start(sctx
, query
);
1678 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1681 .query_type = SI_QUERY_##query_type_, \
1682 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1683 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1684 .group_id = group_id_ \
1687 #define X(name_, query_type_, type_, result_type_) \
1688 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1690 #define XG(group_, name_, query_type_, type_, result_type_) \
1691 XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1693 static struct pipe_driver_query_info si_driver_query_list
[] = {
1694 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1695 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1696 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1697 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1698 X("decompress-calls", DECOMPRESS_CALLS
, UINT64
, AVERAGE
),
1699 X("MRT-draw-calls", MRT_DRAW_CALLS
, UINT64
, AVERAGE
),
1700 X("prim-restart-calls", PRIM_RESTART_CALLS
, UINT64
, AVERAGE
),
1701 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1702 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1703 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1704 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1705 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1706 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1707 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1708 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1709 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1710 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1711 X("num-L2-invalidates", NUM_L2_INVALIDATES
, UINT64
, AVERAGE
),
1712 X("num-L2-writebacks", NUM_L2_WRITEBACKS
, UINT64
, AVERAGE
),
1713 X("num-resident-handles", NUM_RESIDENT_HANDLES
, UINT64
, AVERAGE
),
1714 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS
, UINT64
, AVERAGE
),
1715 X("tc-direct-slots", TC_DIRECT_SLOTS
, UINT64
, AVERAGE
),
1716 X("tc-num-syncs", TC_NUM_SYNCS
, UINT64
, AVERAGE
),
1717 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1718 X("gallium-thread-busy", GALLIUM_THREAD_BUSY
, UINT64
, AVERAGE
),
1719 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1720 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1721 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1722 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1723 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1724 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1725 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1726 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1727 X("GFX-BO-list-size", GFX_BO_LIST_SIZE
, UINT64
, AVERAGE
),
1728 X("GFX-IB-size", GFX_IB_SIZE
, UINT64
, AVERAGE
),
1729 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1730 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1731 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS
, UINT64
, CUMULATIVE
),
1732 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1733 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1734 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1735 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1737 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1738 * which use it as a fallback path to detect the GPU type.
1740 * Note: The names of these queries are significant for GPUPerfStudio
1741 * (and possibly their order as well). */
1742 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1743 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1744 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1745 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1746 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1748 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1749 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1750 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1752 /* The following queries must be at the end of the list because their
1753 * availability is adjusted dynamically based on the DRM version. */
1754 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1755 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
1756 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
1757 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
1758 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
1759 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
1760 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
1761 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
1762 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
1763 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
1764 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
1765 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
1766 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
1767 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
1770 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
1773 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
1774 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
1775 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
1776 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
1777 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY
, UINT64
, AVERAGE
),
1778 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
1785 static unsigned si_get_num_queries(struct si_screen
*sscreen
)
1788 if (sscreen
->info
.drm_major
== 3) {
1789 if (sscreen
->info
.chip_class
>= VI
)
1790 return ARRAY_SIZE(si_driver_query_list
);
1792 return ARRAY_SIZE(si_driver_query_list
) - 7;
1796 if (sscreen
->info
.has_read_registers_query
) {
1797 if (sscreen
->info
.chip_class
== CIK
)
1798 return ARRAY_SIZE(si_driver_query_list
) - 6;
1800 return ARRAY_SIZE(si_driver_query_list
) - 7;
1803 return ARRAY_SIZE(si_driver_query_list
) - 21;
1806 static int si_get_driver_query_info(struct pipe_screen
*screen
,
1808 struct pipe_driver_query_info
*info
)
1810 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1811 unsigned num_queries
= si_get_num_queries(sscreen
);
1814 unsigned num_perfcounters
=
1815 si_get_perfcounter_info(sscreen
, 0, NULL
);
1817 return num_queries
+ num_perfcounters
;
1820 if (index
>= num_queries
)
1821 return si_get_perfcounter_info(sscreen
, index
- num_queries
, info
);
1823 *info
= si_driver_query_list
[index
];
1825 switch (info
->query_type
) {
1826 case SI_QUERY_REQUESTED_VRAM
:
1827 case SI_QUERY_VRAM_USAGE
:
1828 case SI_QUERY_MAPPED_VRAM
:
1829 info
->max_value
.u64
= sscreen
->info
.vram_size
;
1831 case SI_QUERY_REQUESTED_GTT
:
1832 case SI_QUERY_GTT_USAGE
:
1833 case SI_QUERY_MAPPED_GTT
:
1834 info
->max_value
.u64
= sscreen
->info
.gart_size
;
1836 case SI_QUERY_GPU_TEMPERATURE
:
1837 info
->max_value
.u64
= 125;
1839 case SI_QUERY_VRAM_VIS_USAGE
:
1840 info
->max_value
.u64
= sscreen
->info
.vram_vis_size
;
1844 if (info
->group_id
!= ~(unsigned)0 && sscreen
->perfcounters
)
1845 info
->group_id
+= sscreen
->perfcounters
->num_groups
;
1850 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1851 * performance counter groups, so be careful when changing this and related
1854 static int si_get_driver_query_group_info(struct pipe_screen
*screen
,
1856 struct pipe_driver_query_group_info
*info
)
1858 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1859 unsigned num_pc_groups
= 0;
1861 if (sscreen
->perfcounters
)
1862 num_pc_groups
= sscreen
->perfcounters
->num_groups
;
1865 return num_pc_groups
+ SI_NUM_SW_QUERY_GROUPS
;
1867 if (index
< num_pc_groups
)
1868 return si_get_perfcounter_group_info(sscreen
, index
, info
);
1870 index
-= num_pc_groups
;
1871 if (index
>= SI_NUM_SW_QUERY_GROUPS
)
1874 info
->name
= "GPIN";
1875 info
->max_active_queries
= 5;
1876 info
->num_queries
= 5;
1880 void si_init_query_functions(struct si_context
*sctx
)
1882 sctx
->b
.create_query
= si_create_query
;
1883 sctx
->b
.create_batch_query
= si_create_batch_query
;
1884 sctx
->b
.destroy_query
= si_destroy_query
;
1885 sctx
->b
.begin_query
= si_begin_query
;
1886 sctx
->b
.end_query
= si_end_query
;
1887 sctx
->b
.get_query_result
= si_get_query_result
;
1888 sctx
->b
.get_query_result_resource
= si_get_query_result_resource
;
1889 sctx
->atoms
.s
.render_cond
.emit
= si_emit_query_predication
;
1891 if (((struct si_screen
*)sctx
->b
.screen
)->info
.num_render_backends
> 0)
1892 sctx
->b
.render_condition
= si_render_condition
;
1894 LIST_INITHEAD(&sctx
->active_queries
);
1897 void si_init_screen_query_functions(struct si_screen
*sscreen
)
1899 sscreen
->b
.get_driver_query_info
= si_get_driver_query_info
;
1900 sscreen
->b
.get_driver_query_group_info
= si_get_driver_query_group_info
;