2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "util/u_memory.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/os_time.h"
32 #include "util/u_suballoc.h"
33 #include "amd/common/sid.h"
35 #define SI_MAX_STREAMS 4
37 static const struct si_query_ops query_hw_ops
;
39 struct si_hw_query_params
{
40 unsigned start_offset
;
42 unsigned fence_offset
;
47 /* Queries without buffer handling or suspend/resume. */
51 uint64_t begin_result
;
57 /* Fence for GPU_FINISHED. */
58 struct pipe_fence_handle
*fence
;
61 static void si_query_sw_destroy(struct si_screen
*sscreen
,
62 struct si_query
*rquery
)
64 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
66 sscreen
->b
.fence_reference(&sscreen
->b
, &query
->fence
, NULL
);
70 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
73 case SI_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
74 case SI_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
75 case SI_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
76 case SI_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
77 case SI_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
78 case SI_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
79 case SI_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
80 case SI_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
81 case SI_QUERY_GFX_BO_LIST_SIZE
: return RADEON_GFX_BO_LIST_COUNTER
;
82 case SI_QUERY_GFX_IB_SIZE
: return RADEON_GFX_IB_SIZE_COUNTER
;
83 case SI_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
84 case SI_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
85 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS
;
86 case SI_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
87 case SI_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
88 case SI_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
89 case SI_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
90 case SI_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
91 case SI_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
92 case SI_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
93 default: unreachable("query type does not correspond to winsys id");
97 static int64_t si_finish_dma_get_cpu_time(struct si_context
*sctx
)
99 struct pipe_fence_handle
*fence
= NULL
;
101 si_flush_dma_cs(sctx
, 0, &fence
);
103 sctx
->ws
->fence_wait(sctx
->ws
, fence
, PIPE_TIMEOUT_INFINITE
);
104 sctx
->ws
->fence_reference(&fence
, NULL
);
107 return os_time_get_nano();
110 static bool si_query_sw_begin(struct si_context
*sctx
,
111 struct si_query
*rquery
)
113 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
114 enum radeon_value_id ws_id
;
116 switch(query
->b
.type
) {
117 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
118 case PIPE_QUERY_GPU_FINISHED
:
120 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
121 query
->begin_result
= si_finish_dma_get_cpu_time(sctx
);
123 case SI_QUERY_DRAW_CALLS
:
124 query
->begin_result
= sctx
->num_draw_calls
;
126 case SI_QUERY_DECOMPRESS_CALLS
:
127 query
->begin_result
= sctx
->num_decompress_calls
;
129 case SI_QUERY_MRT_DRAW_CALLS
:
130 query
->begin_result
= sctx
->num_mrt_draw_calls
;
132 case SI_QUERY_PRIM_RESTART_CALLS
:
133 query
->begin_result
= sctx
->num_prim_restart_calls
;
135 case SI_QUERY_SPILL_DRAW_CALLS
:
136 query
->begin_result
= sctx
->num_spill_draw_calls
;
138 case SI_QUERY_COMPUTE_CALLS
:
139 query
->begin_result
= sctx
->num_compute_calls
;
141 case SI_QUERY_SPILL_COMPUTE_CALLS
:
142 query
->begin_result
= sctx
->num_spill_compute_calls
;
144 case SI_QUERY_DMA_CALLS
:
145 query
->begin_result
= sctx
->num_dma_calls
;
147 case SI_QUERY_CP_DMA_CALLS
:
148 query
->begin_result
= sctx
->num_cp_dma_calls
;
150 case SI_QUERY_NUM_VS_FLUSHES
:
151 query
->begin_result
= sctx
->num_vs_flushes
;
153 case SI_QUERY_NUM_PS_FLUSHES
:
154 query
->begin_result
= sctx
->num_ps_flushes
;
156 case SI_QUERY_NUM_CS_FLUSHES
:
157 query
->begin_result
= sctx
->num_cs_flushes
;
159 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
160 query
->begin_result
= sctx
->num_cb_cache_flushes
;
162 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
163 query
->begin_result
= sctx
->num_db_cache_flushes
;
165 case SI_QUERY_NUM_L2_INVALIDATES
:
166 query
->begin_result
= sctx
->num_L2_invalidates
;
168 case SI_QUERY_NUM_L2_WRITEBACKS
:
169 query
->begin_result
= sctx
->num_L2_writebacks
;
171 case SI_QUERY_NUM_RESIDENT_HANDLES
:
172 query
->begin_result
= sctx
->num_resident_handles
;
174 case SI_QUERY_TC_OFFLOADED_SLOTS
:
175 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
177 case SI_QUERY_TC_DIRECT_SLOTS
:
178 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
180 case SI_QUERY_TC_NUM_SYNCS
:
181 query
->begin_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
183 case SI_QUERY_REQUESTED_VRAM
:
184 case SI_QUERY_REQUESTED_GTT
:
185 case SI_QUERY_MAPPED_VRAM
:
186 case SI_QUERY_MAPPED_GTT
:
187 case SI_QUERY_VRAM_USAGE
:
188 case SI_QUERY_VRAM_VIS_USAGE
:
189 case SI_QUERY_GTT_USAGE
:
190 case SI_QUERY_GPU_TEMPERATURE
:
191 case SI_QUERY_CURRENT_GPU_SCLK
:
192 case SI_QUERY_CURRENT_GPU_MCLK
:
193 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
194 case SI_QUERY_NUM_MAPPED_BUFFERS
:
195 query
->begin_result
= 0;
197 case SI_QUERY_BUFFER_WAIT_TIME
:
198 case SI_QUERY_GFX_IB_SIZE
:
199 case SI_QUERY_NUM_GFX_IBS
:
200 case SI_QUERY_NUM_SDMA_IBS
:
201 case SI_QUERY_NUM_BYTES_MOVED
:
202 case SI_QUERY_NUM_EVICTIONS
:
203 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
204 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
205 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
208 case SI_QUERY_GFX_BO_LIST_SIZE
:
209 ws_id
= winsys_id_from_type(query
->b
.type
);
210 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
211 query
->begin_time
= sctx
->ws
->query_value(sctx
->ws
,
214 case SI_QUERY_CS_THREAD_BUSY
:
215 ws_id
= winsys_id_from_type(query
->b
.type
);
216 query
->begin_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
217 query
->begin_time
= os_time_get_nano();
219 case SI_QUERY_GALLIUM_THREAD_BUSY
:
220 query
->begin_result
=
221 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
222 query
->begin_time
= os_time_get_nano();
224 case SI_QUERY_GPU_LOAD
:
225 case SI_QUERY_GPU_SHADERS_BUSY
:
226 case SI_QUERY_GPU_TA_BUSY
:
227 case SI_QUERY_GPU_GDS_BUSY
:
228 case SI_QUERY_GPU_VGT_BUSY
:
229 case SI_QUERY_GPU_IA_BUSY
:
230 case SI_QUERY_GPU_SX_BUSY
:
231 case SI_QUERY_GPU_WD_BUSY
:
232 case SI_QUERY_GPU_BCI_BUSY
:
233 case SI_QUERY_GPU_SC_BUSY
:
234 case SI_QUERY_GPU_PA_BUSY
:
235 case SI_QUERY_GPU_DB_BUSY
:
236 case SI_QUERY_GPU_CP_BUSY
:
237 case SI_QUERY_GPU_CB_BUSY
:
238 case SI_QUERY_GPU_SDMA_BUSY
:
239 case SI_QUERY_GPU_PFP_BUSY
:
240 case SI_QUERY_GPU_MEQ_BUSY
:
241 case SI_QUERY_GPU_ME_BUSY
:
242 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
243 case SI_QUERY_GPU_CP_DMA_BUSY
:
244 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
245 query
->begin_result
= si_begin_counter(sctx
->screen
,
248 case SI_QUERY_NUM_COMPILATIONS
:
249 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
251 case SI_QUERY_NUM_SHADERS_CREATED
:
252 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
254 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
255 query
->begin_result
=
256 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
258 case SI_QUERY_GPIN_ASIC_ID
:
259 case SI_QUERY_GPIN_NUM_SIMD
:
260 case SI_QUERY_GPIN_NUM_RB
:
261 case SI_QUERY_GPIN_NUM_SPI
:
262 case SI_QUERY_GPIN_NUM_SE
:
265 unreachable("si_query_sw_begin: bad query type");
271 static bool si_query_sw_end(struct si_context
*sctx
,
272 struct si_query
*rquery
)
274 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
275 enum radeon_value_id ws_id
;
277 switch(query
->b
.type
) {
278 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
280 case PIPE_QUERY_GPU_FINISHED
:
281 sctx
->b
.flush(&sctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
283 case SI_QUERY_TIME_ELAPSED_SDMA_SI
:
284 query
->end_result
= si_finish_dma_get_cpu_time(sctx
);
286 case SI_QUERY_DRAW_CALLS
:
287 query
->end_result
= sctx
->num_draw_calls
;
289 case SI_QUERY_DECOMPRESS_CALLS
:
290 query
->end_result
= sctx
->num_decompress_calls
;
292 case SI_QUERY_MRT_DRAW_CALLS
:
293 query
->end_result
= sctx
->num_mrt_draw_calls
;
295 case SI_QUERY_PRIM_RESTART_CALLS
:
296 query
->end_result
= sctx
->num_prim_restart_calls
;
298 case SI_QUERY_SPILL_DRAW_CALLS
:
299 query
->end_result
= sctx
->num_spill_draw_calls
;
301 case SI_QUERY_COMPUTE_CALLS
:
302 query
->end_result
= sctx
->num_compute_calls
;
304 case SI_QUERY_SPILL_COMPUTE_CALLS
:
305 query
->end_result
= sctx
->num_spill_compute_calls
;
307 case SI_QUERY_DMA_CALLS
:
308 query
->end_result
= sctx
->num_dma_calls
;
310 case SI_QUERY_CP_DMA_CALLS
:
311 query
->end_result
= sctx
->num_cp_dma_calls
;
313 case SI_QUERY_NUM_VS_FLUSHES
:
314 query
->end_result
= sctx
->num_vs_flushes
;
316 case SI_QUERY_NUM_PS_FLUSHES
:
317 query
->end_result
= sctx
->num_ps_flushes
;
319 case SI_QUERY_NUM_CS_FLUSHES
:
320 query
->end_result
= sctx
->num_cs_flushes
;
322 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
323 query
->end_result
= sctx
->num_cb_cache_flushes
;
325 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
326 query
->end_result
= sctx
->num_db_cache_flushes
;
328 case SI_QUERY_NUM_L2_INVALIDATES
:
329 query
->end_result
= sctx
->num_L2_invalidates
;
331 case SI_QUERY_NUM_L2_WRITEBACKS
:
332 query
->end_result
= sctx
->num_L2_writebacks
;
334 case SI_QUERY_NUM_RESIDENT_HANDLES
:
335 query
->end_result
= sctx
->num_resident_handles
;
337 case SI_QUERY_TC_OFFLOADED_SLOTS
:
338 query
->end_result
= sctx
->tc
? sctx
->tc
->num_offloaded_slots
: 0;
340 case SI_QUERY_TC_DIRECT_SLOTS
:
341 query
->end_result
= sctx
->tc
? sctx
->tc
->num_direct_slots
: 0;
343 case SI_QUERY_TC_NUM_SYNCS
:
344 query
->end_result
= sctx
->tc
? sctx
->tc
->num_syncs
: 0;
346 case SI_QUERY_REQUESTED_VRAM
:
347 case SI_QUERY_REQUESTED_GTT
:
348 case SI_QUERY_MAPPED_VRAM
:
349 case SI_QUERY_MAPPED_GTT
:
350 case SI_QUERY_VRAM_USAGE
:
351 case SI_QUERY_VRAM_VIS_USAGE
:
352 case SI_QUERY_GTT_USAGE
:
353 case SI_QUERY_GPU_TEMPERATURE
:
354 case SI_QUERY_CURRENT_GPU_SCLK
:
355 case SI_QUERY_CURRENT_GPU_MCLK
:
356 case SI_QUERY_BUFFER_WAIT_TIME
:
357 case SI_QUERY_GFX_IB_SIZE
:
358 case SI_QUERY_NUM_MAPPED_BUFFERS
:
359 case SI_QUERY_NUM_GFX_IBS
:
360 case SI_QUERY_NUM_SDMA_IBS
:
361 case SI_QUERY_NUM_BYTES_MOVED
:
362 case SI_QUERY_NUM_EVICTIONS
:
363 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
364 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
365 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
368 case SI_QUERY_GFX_BO_LIST_SIZE
:
369 ws_id
= winsys_id_from_type(query
->b
.type
);
370 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
371 query
->end_time
= sctx
->ws
->query_value(sctx
->ws
,
374 case SI_QUERY_CS_THREAD_BUSY
:
375 ws_id
= winsys_id_from_type(query
->b
.type
);
376 query
->end_result
= sctx
->ws
->query_value(sctx
->ws
, ws_id
);
377 query
->end_time
= os_time_get_nano();
379 case SI_QUERY_GALLIUM_THREAD_BUSY
:
381 sctx
->tc
? util_queue_get_thread_time_nano(&sctx
->tc
->queue
, 0) : 0;
382 query
->end_time
= os_time_get_nano();
384 case SI_QUERY_GPU_LOAD
:
385 case SI_QUERY_GPU_SHADERS_BUSY
:
386 case SI_QUERY_GPU_TA_BUSY
:
387 case SI_QUERY_GPU_GDS_BUSY
:
388 case SI_QUERY_GPU_VGT_BUSY
:
389 case SI_QUERY_GPU_IA_BUSY
:
390 case SI_QUERY_GPU_SX_BUSY
:
391 case SI_QUERY_GPU_WD_BUSY
:
392 case SI_QUERY_GPU_BCI_BUSY
:
393 case SI_QUERY_GPU_SC_BUSY
:
394 case SI_QUERY_GPU_PA_BUSY
:
395 case SI_QUERY_GPU_DB_BUSY
:
396 case SI_QUERY_GPU_CP_BUSY
:
397 case SI_QUERY_GPU_CB_BUSY
:
398 case SI_QUERY_GPU_SDMA_BUSY
:
399 case SI_QUERY_GPU_PFP_BUSY
:
400 case SI_QUERY_GPU_MEQ_BUSY
:
401 case SI_QUERY_GPU_ME_BUSY
:
402 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
403 case SI_QUERY_GPU_CP_DMA_BUSY
:
404 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
405 query
->end_result
= si_end_counter(sctx
->screen
,
407 query
->begin_result
);
408 query
->begin_result
= 0;
410 case SI_QUERY_NUM_COMPILATIONS
:
411 query
->end_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
413 case SI_QUERY_NUM_SHADERS_CREATED
:
414 query
->end_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
416 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
417 query
->end_result
= sctx
->last_tex_ps_draw_ratio
;
419 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
421 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
423 case SI_QUERY_GPIN_ASIC_ID
:
424 case SI_QUERY_GPIN_NUM_SIMD
:
425 case SI_QUERY_GPIN_NUM_RB
:
426 case SI_QUERY_GPIN_NUM_SPI
:
427 case SI_QUERY_GPIN_NUM_SE
:
430 unreachable("si_query_sw_end: bad query type");
436 static bool si_query_sw_get_result(struct si_context
*sctx
,
437 struct si_query
*rquery
,
439 union pipe_query_result
*result
)
441 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
443 switch (query
->b
.type
) {
444 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
445 /* Convert from cycles per millisecond to cycles per second (Hz). */
446 result
->timestamp_disjoint
.frequency
=
447 (uint64_t)sctx
->screen
->info
.clock_crystal_freq
* 1000;
448 result
->timestamp_disjoint
.disjoint
= false;
450 case PIPE_QUERY_GPU_FINISHED
: {
451 struct pipe_screen
*screen
= sctx
->b
.screen
;
452 struct pipe_context
*ctx
= rquery
->b
.flushed
? NULL
: &sctx
->b
;
454 result
->b
= screen
->fence_finish(screen
, ctx
, query
->fence
,
455 wait
? PIPE_TIMEOUT_INFINITE
: 0);
459 case SI_QUERY_GFX_BO_LIST_SIZE
:
460 result
->u64
= (query
->end_result
- query
->begin_result
) /
461 (query
->end_time
- query
->begin_time
);
463 case SI_QUERY_CS_THREAD_BUSY
:
464 case SI_QUERY_GALLIUM_THREAD_BUSY
:
465 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
466 (query
->end_time
- query
->begin_time
);
468 case SI_QUERY_GPIN_ASIC_ID
:
471 case SI_QUERY_GPIN_NUM_SIMD
:
472 result
->u32
= sctx
->screen
->info
.num_good_compute_units
;
474 case SI_QUERY_GPIN_NUM_RB
:
475 result
->u32
= sctx
->screen
->info
.num_render_backends
;
477 case SI_QUERY_GPIN_NUM_SPI
:
478 result
->u32
= 1; /* all supported chips have one SPI per SE */
480 case SI_QUERY_GPIN_NUM_SE
:
481 result
->u32
= sctx
->screen
->info
.max_se
;
485 result
->u64
= query
->end_result
- query
->begin_result
;
487 switch (query
->b
.type
) {
488 case SI_QUERY_BUFFER_WAIT_TIME
:
489 case SI_QUERY_GPU_TEMPERATURE
:
492 case SI_QUERY_CURRENT_GPU_SCLK
:
493 case SI_QUERY_CURRENT_GPU_MCLK
:
494 result
->u64
*= 1000000;
502 static const struct si_query_ops sw_query_ops
= {
503 .destroy
= si_query_sw_destroy
,
504 .begin
= si_query_sw_begin
,
505 .end
= si_query_sw_end
,
506 .get_result
= si_query_sw_get_result
,
507 .get_result_resource
= NULL
510 static struct pipe_query
*si_query_sw_create(unsigned query_type
)
512 struct si_query_sw
*query
;
514 query
= CALLOC_STRUCT(si_query_sw
);
518 query
->b
.type
= query_type
;
519 query
->b
.ops
= &sw_query_ops
;
521 return (struct pipe_query
*)query
;
524 void si_query_buffer_destroy(struct si_screen
*sscreen
, struct si_query_buffer
*buffer
)
526 struct si_query_buffer
*prev
= buffer
->previous
;
528 /* Release all query buffers. */
530 struct si_query_buffer
*qbuf
= prev
;
531 prev
= prev
->previous
;
532 r600_resource_reference(&qbuf
->buf
, NULL
);
536 r600_resource_reference(&buffer
->buf
, NULL
);
539 void si_query_buffer_reset(struct si_context
*sctx
, struct si_query_buffer
*buffer
)
541 /* Discard all query buffers except for the oldest. */
542 while (buffer
->previous
) {
543 struct si_query_buffer
*qbuf
= buffer
->previous
;
544 buffer
->previous
= qbuf
->previous
;
546 r600_resource_reference(&buffer
->buf
, NULL
);
547 buffer
->buf
= qbuf
->buf
; /* move ownership */
550 buffer
->results_end
= 0;
552 /* Discard even the oldest buffer if it can't be mapped without a stall. */
554 (si_rings_is_buffer_referenced(sctx
, buffer
->buf
->buf
, RADEON_USAGE_READWRITE
) ||
555 !sctx
->ws
->buffer_wait(buffer
->buf
->buf
, 0, RADEON_USAGE_READWRITE
))) {
556 r600_resource_reference(&buffer
->buf
, NULL
);
560 bool si_query_buffer_alloc(struct si_context
*sctx
, struct si_query_buffer
*buffer
,
561 bool (*prepare_buffer
)(struct si_context
*, struct si_query_buffer
*),
564 if (buffer
->buf
&& buffer
->results_end
+ size
>= buffer
->buf
->b
.b
.width0
)
568 struct si_query_buffer
*qbuf
= MALLOC_STRUCT(si_query_buffer
);
569 memcpy(qbuf
, buffer
, sizeof(*qbuf
));
570 buffer
->previous
= qbuf
;
573 buffer
->results_end
= 0;
575 /* Queries are normally read by the CPU after
576 * being written by the gpu, hence staging is probably a good
579 struct si_screen
*screen
= sctx
->screen
;
580 unsigned buf_size
= MAX2(size
, screen
->info
.min_alloc_size
);
581 buffer
->buf
= r600_resource(
582 pipe_buffer_create(&screen
->b
, 0, PIPE_USAGE_STAGING
, buf_size
));
583 if (unlikely(!buffer
->buf
))
586 if (prepare_buffer
) {
587 if (unlikely(!prepare_buffer(sctx
, buffer
))) {
588 r600_resource_reference(&buffer
->buf
, NULL
);
597 void si_query_hw_destroy(struct si_screen
*sscreen
,
598 struct si_query
*rquery
)
600 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
602 si_query_buffer_destroy(sscreen
, &query
->buffer
);
603 r600_resource_reference(&query
->workaround_buf
, NULL
);
607 static bool si_query_hw_prepare_buffer(struct si_context
*sctx
,
608 struct si_query_buffer
*qbuf
)
610 static const struct si_query_hw si_query_hw_s
;
611 struct si_query_hw
*query
= container_of(qbuf
, &si_query_hw_s
, buffer
);
612 struct si_screen
*screen
= sctx
->screen
;
614 /* The caller ensures that the buffer is currently unused by the GPU. */
615 uint32_t *results
= screen
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
,
616 PIPE_TRANSFER_WRITE
|
617 PIPE_TRANSFER_UNSYNCHRONIZED
);
621 memset(results
, 0, qbuf
->buf
->b
.b
.width0
);
623 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
624 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
625 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
626 unsigned max_rbs
= screen
->info
.num_render_backends
;
627 unsigned enabled_rb_mask
= screen
->info
.enabled_rb_mask
;
628 unsigned num_results
;
631 /* Set top bits for unused backends. */
632 num_results
= qbuf
->buf
->b
.b
.width0
/ query
->result_size
;
633 for (j
= 0; j
< num_results
; j
++) {
634 for (i
= 0; i
< max_rbs
; i
++) {
635 if (!(enabled_rb_mask
& (1<<i
))) {
636 results
[(i
* 4)+1] = 0x80000000;
637 results
[(i
* 4)+3] = 0x80000000;
640 results
+= 4 * max_rbs
;
647 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
648 struct si_query
*rquery
,
650 enum pipe_query_value_type result_type
,
652 struct pipe_resource
*resource
,
655 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
656 struct si_query_hw
*query
,
657 struct r600_resource
*buffer
,
659 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
660 struct si_query_hw
*query
,
661 struct r600_resource
*buffer
,
663 static void si_query_hw_add_result(struct si_screen
*sscreen
,
664 struct si_query_hw
*, void *buffer
,
665 union pipe_query_result
*result
);
666 static void si_query_hw_clear_result(struct si_query_hw
*,
667 union pipe_query_result
*);
669 static struct si_query_hw_ops query_hw_default_hw_ops
= {
670 .prepare_buffer
= si_query_hw_prepare_buffer
,
671 .emit_start
= si_query_hw_do_emit_start
,
672 .emit_stop
= si_query_hw_do_emit_stop
,
673 .clear_result
= si_query_hw_clear_result
,
674 .add_result
= si_query_hw_add_result
,
677 static struct pipe_query
*si_query_hw_create(struct si_screen
*sscreen
,
681 struct si_query_hw
*query
= CALLOC_STRUCT(si_query_hw
);
685 query
->b
.type
= query_type
;
686 query
->b
.ops
= &query_hw_ops
;
687 query
->ops
= &query_hw_default_hw_ops
;
689 switch (query_type
) {
690 case PIPE_QUERY_OCCLUSION_COUNTER
:
691 case PIPE_QUERY_OCCLUSION_PREDICATE
:
692 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
693 query
->result_size
= 16 * sscreen
->info
.num_render_backends
;
694 query
->result_size
+= 16; /* for the fence + alignment */
695 query
->b
.num_cs_dw_suspend
= 6 + si_cp_write_fence_dwords(sscreen
);
697 case SI_QUERY_TIME_ELAPSED_SDMA
:
698 /* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
699 query
->result_size
= 64;
701 case PIPE_QUERY_TIME_ELAPSED
:
702 query
->result_size
= 24;
703 query
->b
.num_cs_dw_suspend
= 8 + si_cp_write_fence_dwords(sscreen
);
705 case PIPE_QUERY_TIMESTAMP
:
706 query
->result_size
= 16;
707 query
->b
.num_cs_dw_suspend
= 8 + si_cp_write_fence_dwords(sscreen
);
708 query
->flags
= SI_QUERY_HW_FLAG_NO_START
;
710 case PIPE_QUERY_PRIMITIVES_EMITTED
:
711 case PIPE_QUERY_PRIMITIVES_GENERATED
:
712 case PIPE_QUERY_SO_STATISTICS
:
713 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
714 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
715 query
->result_size
= 32;
716 query
->b
.num_cs_dw_suspend
= 6;
717 query
->stream
= index
;
719 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
720 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
721 query
->result_size
= 32 * SI_MAX_STREAMS
;
722 query
->b
.num_cs_dw_suspend
= 6 * SI_MAX_STREAMS
;
724 case PIPE_QUERY_PIPELINE_STATISTICS
:
725 /* 11 values on GCN. */
726 query
->result_size
= 11 * 16;
727 query
->result_size
+= 8; /* for the fence + alignment */
728 query
->b
.num_cs_dw_suspend
= 6 + si_cp_write_fence_dwords(sscreen
);
736 return (struct pipe_query
*)query
;
739 static void si_update_occlusion_query_state(struct si_context
*sctx
,
740 unsigned type
, int diff
)
742 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
743 type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
744 type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
745 bool old_enable
= sctx
->num_occlusion_queries
!= 0;
746 bool old_perfect_enable
=
747 sctx
->num_perfect_occlusion_queries
!= 0;
748 bool enable
, perfect_enable
;
750 sctx
->num_occlusion_queries
+= diff
;
751 assert(sctx
->num_occlusion_queries
>= 0);
753 if (type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
754 sctx
->num_perfect_occlusion_queries
+= diff
;
755 assert(sctx
->num_perfect_occlusion_queries
>= 0);
758 enable
= sctx
->num_occlusion_queries
!= 0;
759 perfect_enable
= sctx
->num_perfect_occlusion_queries
!= 0;
761 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
762 si_set_occlusion_query_state(sctx
, old_perfect_enable
);
767 static unsigned event_type_for_stream(unsigned stream
)
771 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
772 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
773 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
774 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
778 static void emit_sample_streamout(struct radeon_cmdbuf
*cs
, uint64_t va
,
781 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
782 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(stream
)) | EVENT_INDEX(3));
784 radeon_emit(cs
, va
>> 32);
787 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
788 struct si_query_hw
*query
,
789 struct r600_resource
*buffer
,
792 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
794 switch (query
->b
.type
) {
795 case SI_QUERY_TIME_ELAPSED_SDMA
:
796 si_dma_emit_timestamp(sctx
, buffer
, va
- buffer
->gpu_address
);
798 case PIPE_QUERY_OCCLUSION_COUNTER
:
799 case PIPE_QUERY_OCCLUSION_PREDICATE
:
800 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
801 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
802 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
804 radeon_emit(cs
, va
>> 32);
806 case PIPE_QUERY_PRIMITIVES_EMITTED
:
807 case PIPE_QUERY_PRIMITIVES_GENERATED
:
808 case PIPE_QUERY_SO_STATISTICS
:
809 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
810 emit_sample_streamout(cs
, va
, query
->stream
);
812 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
813 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
814 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
816 case PIPE_QUERY_TIME_ELAPSED
:
817 si_cp_release_mem(sctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
818 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
819 EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
822 case PIPE_QUERY_PIPELINE_STATISTICS
:
823 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
824 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
826 radeon_emit(cs
, va
>> 32);
831 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
835 static void si_query_hw_emit_start(struct si_context
*sctx
,
836 struct si_query_hw
*query
)
840 if (!si_query_buffer_alloc(sctx
, &query
->buffer
, query
->ops
->prepare_buffer
,
844 si_update_occlusion_query_state(sctx
, query
->b
.type
, 1);
845 si_update_prims_generated_query_state(sctx
, query
->b
.type
, 1);
847 if (query
->b
.type
!= SI_QUERY_TIME_ELAPSED_SDMA
)
848 si_need_gfx_cs_space(sctx
);
850 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
851 query
->ops
->emit_start(sctx
, query
, query
->buffer
.buf
, va
);
854 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
855 struct si_query_hw
*query
,
856 struct r600_resource
*buffer
,
859 struct radeon_cmdbuf
*cs
= sctx
->gfx_cs
;
860 uint64_t fence_va
= 0;
862 switch (query
->b
.type
) {
863 case SI_QUERY_TIME_ELAPSED_SDMA
:
864 si_dma_emit_timestamp(sctx
, buffer
, va
+ 32 - buffer
->gpu_address
);
866 case PIPE_QUERY_OCCLUSION_COUNTER
:
867 case PIPE_QUERY_OCCLUSION_PREDICATE
:
868 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
870 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
871 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
873 radeon_emit(cs
, va
>> 32);
875 fence_va
= va
+ sctx
->screen
->info
.num_render_backends
* 16 - 8;
877 case PIPE_QUERY_PRIMITIVES_EMITTED
:
878 case PIPE_QUERY_PRIMITIVES_GENERATED
:
879 case PIPE_QUERY_SO_STATISTICS
:
880 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
882 emit_sample_streamout(cs
, va
, query
->stream
);
884 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
886 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
887 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
889 case PIPE_QUERY_TIME_ELAPSED
:
892 case PIPE_QUERY_TIMESTAMP
:
893 si_cp_release_mem(sctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
894 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
895 EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
899 case PIPE_QUERY_PIPELINE_STATISTICS
: {
900 unsigned sample_size
= (query
->result_size
- 8) / 2;
903 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
904 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
906 radeon_emit(cs
, va
>> 32);
908 fence_va
= va
+ sample_size
;
914 radeon_add_to_buffer_list(sctx
, sctx
->gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
918 si_cp_release_mem(sctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
919 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
920 EOP_DATA_SEL_VALUE_32BIT
,
921 query
->buffer
.buf
, fence_va
, 0x80000000,
926 static void si_query_hw_emit_stop(struct si_context
*sctx
,
927 struct si_query_hw
*query
)
931 /* The queries which need begin already called this in begin_query. */
932 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
933 si_need_gfx_cs_space(sctx
);
934 if (!si_query_buffer_alloc(sctx
, &query
->buffer
, query
->ops
->prepare_buffer
,
939 if (!query
->buffer
.buf
)
940 return; // previous buffer allocation failure
943 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
945 query
->ops
->emit_stop(sctx
, query
, query
->buffer
.buf
, va
);
947 query
->buffer
.results_end
+= query
->result_size
;
949 si_update_occlusion_query_state(sctx
, query
->b
.type
, -1);
950 si_update_prims_generated_query_state(sctx
, query
->b
.type
, -1);
953 static void emit_set_predicate(struct si_context
*ctx
,
954 struct r600_resource
*buf
, uint64_t va
,
957 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
959 if (ctx
->chip_class
>= GFX9
) {
960 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
963 radeon_emit(cs
, va
>> 32);
965 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
967 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
969 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, buf
, RADEON_USAGE_READ
,
973 static void si_emit_query_predication(struct si_context
*ctx
)
975 struct si_query_hw
*query
= (struct si_query_hw
*)ctx
->render_cond
;
976 struct si_query_buffer
*qbuf
;
978 bool flag_wait
, invert
;
983 invert
= ctx
->render_cond_invert
;
984 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
985 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
987 if (query
->workaround_buf
) {
988 op
= PRED_OP(PREDICATION_OP_BOOL64
);
990 switch (query
->b
.type
) {
991 case PIPE_QUERY_OCCLUSION_COUNTER
:
992 case PIPE_QUERY_OCCLUSION_PREDICATE
:
993 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
994 op
= PRED_OP(PREDICATION_OP_ZPASS
);
996 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
997 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
998 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
1007 /* if true then invert, see GL_ARB_conditional_render_inverted */
1009 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visible or overflow */
1011 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visible or no overflow */
1013 /* Use the value written by compute shader as a workaround. Note that
1014 * the wait flag does not apply in this predication mode.
1016 * The shader outputs the result value to L2. Workarounds only affect VI
1017 * and later, where the CP reads data from L2, so we don't need an
1020 if (query
->workaround_buf
) {
1021 uint64_t va
= query
->workaround_buf
->gpu_address
+ query
->workaround_offset
;
1022 emit_set_predicate(ctx
, query
->workaround_buf
, va
, op
);
1026 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
1028 /* emit predicate packets for all data blocks */
1029 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1030 unsigned results_base
= 0;
1031 uint64_t va_base
= qbuf
->buf
->gpu_address
;
1033 while (results_base
< qbuf
->results_end
) {
1034 uint64_t va
= va_base
+ results_base
;
1036 if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
) {
1037 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1038 emit_set_predicate(ctx
, qbuf
->buf
, va
+ 32 * stream
, op
);
1040 /* set CONTINUE bit for all packets except the first */
1041 op
|= PREDICATION_CONTINUE
;
1044 emit_set_predicate(ctx
, qbuf
->buf
, va
, op
);
1045 op
|= PREDICATION_CONTINUE
;
1048 results_base
+= query
->result_size
;
1053 static struct pipe_query
*si_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
1055 struct si_screen
*sscreen
=
1056 (struct si_screen
*)ctx
->screen
;
1058 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
1059 query_type
== PIPE_QUERY_GPU_FINISHED
||
1060 (query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
&&
1061 query_type
!= SI_QUERY_TIME_ELAPSED_SDMA
))
1062 return si_query_sw_create(query_type
);
1064 return si_query_hw_create(sscreen
, query_type
, index
);
1067 static void si_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1069 struct si_context
*sctx
= (struct si_context
*)ctx
;
1070 struct si_query
*rquery
= (struct si_query
*)query
;
1072 rquery
->ops
->destroy(sctx
->screen
, rquery
);
1075 static boolean
si_begin_query(struct pipe_context
*ctx
,
1076 struct pipe_query
*query
)
1078 struct si_context
*sctx
= (struct si_context
*)ctx
;
1079 struct si_query
*rquery
= (struct si_query
*)query
;
1081 return rquery
->ops
->begin(sctx
, rquery
);
1084 bool si_query_hw_begin(struct si_context
*sctx
,
1085 struct si_query
*rquery
)
1087 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1089 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
1094 if (!(query
->flags
& SI_QUERY_HW_FLAG_BEGIN_RESUMES
))
1095 si_query_buffer_reset(sctx
, &query
->buffer
);
1097 r600_resource_reference(&query
->workaround_buf
, NULL
);
1099 si_query_hw_emit_start(sctx
, query
);
1100 if (!query
->buffer
.buf
)
1103 LIST_ADDTAIL(&query
->b
.active_list
, &sctx
->active_queries
);
1104 sctx
->num_cs_dw_queries_suspend
+= query
->b
.num_cs_dw_suspend
;
1108 static bool si_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1110 struct si_context
*sctx
= (struct si_context
*)ctx
;
1111 struct si_query
*rquery
= (struct si_query
*)query
;
1113 return rquery
->ops
->end(sctx
, rquery
);
1116 bool si_query_hw_end(struct si_context
*sctx
,
1117 struct si_query
*rquery
)
1119 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1121 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
)
1122 si_query_buffer_reset(sctx
, &query
->buffer
);
1124 si_query_hw_emit_stop(sctx
, query
);
1126 if (!(query
->flags
& SI_QUERY_HW_FLAG_NO_START
)) {
1127 LIST_DELINIT(&query
->b
.active_list
);
1128 sctx
->num_cs_dw_queries_suspend
-= query
->b
.num_cs_dw_suspend
;
1131 if (!query
->buffer
.buf
)
1137 static void si_get_hw_query_params(struct si_context
*sctx
,
1138 struct si_query_hw
*rquery
, int index
,
1139 struct si_hw_query_params
*params
)
1141 unsigned max_rbs
= sctx
->screen
->info
.num_render_backends
;
1143 params
->pair_stride
= 0;
1144 params
->pair_count
= 1;
1146 switch (rquery
->b
.type
) {
1147 case PIPE_QUERY_OCCLUSION_COUNTER
:
1148 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1149 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1150 params
->start_offset
= 0;
1151 params
->end_offset
= 8;
1152 params
->fence_offset
= max_rbs
* 16;
1153 params
->pair_stride
= 16;
1154 params
->pair_count
= max_rbs
;
1156 case PIPE_QUERY_TIME_ELAPSED
:
1157 params
->start_offset
= 0;
1158 params
->end_offset
= 8;
1159 params
->fence_offset
= 16;
1161 case PIPE_QUERY_TIMESTAMP
:
1162 params
->start_offset
= 0;
1163 params
->end_offset
= 0;
1164 params
->fence_offset
= 8;
1166 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1167 params
->start_offset
= 8;
1168 params
->end_offset
= 24;
1169 params
->fence_offset
= params
->end_offset
+ 4;
1171 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1172 params
->start_offset
= 0;
1173 params
->end_offset
= 16;
1174 params
->fence_offset
= params
->end_offset
+ 4;
1176 case PIPE_QUERY_SO_STATISTICS
:
1177 params
->start_offset
= 8 - index
* 8;
1178 params
->end_offset
= 24 - index
* 8;
1179 params
->fence_offset
= params
->end_offset
+ 4;
1181 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1182 params
->pair_count
= SI_MAX_STREAMS
;
1183 params
->pair_stride
= 32;
1184 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1185 params
->start_offset
= 0;
1186 params
->end_offset
= 16;
1188 /* We can re-use the high dword of the last 64-bit value as a
1189 * fence: it is initialized as 0, and the high bit is set by
1190 * the write of the streamout stats event.
1192 params
->fence_offset
= rquery
->result_size
- 4;
1194 case PIPE_QUERY_PIPELINE_STATISTICS
:
1196 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1197 params
->start_offset
= offsets
[index
];
1198 params
->end_offset
= 88 + offsets
[index
];
1199 params
->fence_offset
= 2 * 88;
1203 unreachable("si_get_hw_query_params unsupported");
1207 static unsigned si_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1208 bool test_status_bit
)
1210 uint32_t *current_result
= (uint32_t*)map
;
1211 uint64_t start
, end
;
1213 start
= (uint64_t)current_result
[start_index
] |
1214 (uint64_t)current_result
[start_index
+1] << 32;
1215 end
= (uint64_t)current_result
[end_index
] |
1216 (uint64_t)current_result
[end_index
+1] << 32;
1218 if (!test_status_bit
||
1219 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1225 static void si_query_hw_add_result(struct si_screen
*sscreen
,
1226 struct si_query_hw
*query
,
1228 union pipe_query_result
*result
)
1230 unsigned max_rbs
= sscreen
->info
.num_render_backends
;
1232 switch (query
->b
.type
) {
1233 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1234 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1235 unsigned results_base
= i
* 16;
1237 si_query_read_result(buffer
+ results_base
, 0, 2, true);
1241 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1242 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1243 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1244 unsigned results_base
= i
* 16;
1245 result
->b
= result
->b
||
1246 si_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1250 case PIPE_QUERY_TIME_ELAPSED
:
1251 result
->u64
+= si_query_read_result(buffer
, 0, 2, false);
1253 case SI_QUERY_TIME_ELAPSED_SDMA
:
1254 result
->u64
+= si_query_read_result(buffer
, 0, 32/4, false);
1256 case PIPE_QUERY_TIMESTAMP
:
1257 result
->u64
= *(uint64_t*)buffer
;
1259 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1260 /* SAMPLE_STREAMOUTSTATS stores this structure:
1262 * u64 NumPrimitivesWritten;
1263 * u64 PrimitiveStorageNeeded;
1265 * We only need NumPrimitivesWritten here. */
1266 result
->u64
+= si_query_read_result(buffer
, 2, 6, true);
1268 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1269 /* Here we read PrimitiveStorageNeeded. */
1270 result
->u64
+= si_query_read_result(buffer
, 0, 4, true);
1272 case PIPE_QUERY_SO_STATISTICS
:
1273 result
->so_statistics
.num_primitives_written
+=
1274 si_query_read_result(buffer
, 2, 6, true);
1275 result
->so_statistics
.primitives_storage_needed
+=
1276 si_query_read_result(buffer
, 0, 4, true);
1278 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1279 result
->b
= result
->b
||
1280 si_query_read_result(buffer
, 2, 6, true) !=
1281 si_query_read_result(buffer
, 0, 4, true);
1283 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1284 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1285 result
->b
= result
->b
||
1286 si_query_read_result(buffer
, 2, 6, true) !=
1287 si_query_read_result(buffer
, 0, 4, true);
1288 buffer
= (char *)buffer
+ 32;
1291 case PIPE_QUERY_PIPELINE_STATISTICS
:
1292 result
->pipeline_statistics
.ps_invocations
+=
1293 si_query_read_result(buffer
, 0, 22, false);
1294 result
->pipeline_statistics
.c_primitives
+=
1295 si_query_read_result(buffer
, 2, 24, false);
1296 result
->pipeline_statistics
.c_invocations
+=
1297 si_query_read_result(buffer
, 4, 26, false);
1298 result
->pipeline_statistics
.vs_invocations
+=
1299 si_query_read_result(buffer
, 6, 28, false);
1300 result
->pipeline_statistics
.gs_invocations
+=
1301 si_query_read_result(buffer
, 8, 30, false);
1302 result
->pipeline_statistics
.gs_primitives
+=
1303 si_query_read_result(buffer
, 10, 32, false);
1304 result
->pipeline_statistics
.ia_primitives
+=
1305 si_query_read_result(buffer
, 12, 34, false);
1306 result
->pipeline_statistics
.ia_vertices
+=
1307 si_query_read_result(buffer
, 14, 36, false);
1308 result
->pipeline_statistics
.hs_invocations
+=
1309 si_query_read_result(buffer
, 16, 38, false);
1310 result
->pipeline_statistics
.ds_invocations
+=
1311 si_query_read_result(buffer
, 18, 40, false);
1312 result
->pipeline_statistics
.cs_invocations
+=
1313 si_query_read_result(buffer
, 20, 42, false);
1314 #if 0 /* for testing */
1315 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1316 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1317 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1318 result
->pipeline_statistics
.ia_vertices
,
1319 result
->pipeline_statistics
.ia_primitives
,
1320 result
->pipeline_statistics
.vs_invocations
,
1321 result
->pipeline_statistics
.hs_invocations
,
1322 result
->pipeline_statistics
.ds_invocations
,
1323 result
->pipeline_statistics
.gs_invocations
,
1324 result
->pipeline_statistics
.gs_primitives
,
1325 result
->pipeline_statistics
.c_invocations
,
1326 result
->pipeline_statistics
.c_primitives
,
1327 result
->pipeline_statistics
.ps_invocations
,
1328 result
->pipeline_statistics
.cs_invocations
);
1336 void si_query_hw_suspend(struct si_context
*sctx
, struct si_query
*query
)
1338 si_query_hw_emit_stop(sctx
, (struct si_query_hw
*)query
);
1341 void si_query_hw_resume(struct si_context
*sctx
, struct si_query
*query
)
1343 si_query_hw_emit_start(sctx
, (struct si_query_hw
*)query
);
1346 static const struct si_query_ops query_hw_ops
= {
1347 .destroy
= si_query_hw_destroy
,
1348 .begin
= si_query_hw_begin
,
1349 .end
= si_query_hw_end
,
1350 .get_result
= si_query_hw_get_result
,
1351 .get_result_resource
= si_query_hw_get_result_resource
,
1353 .suspend
= si_query_hw_suspend
,
1354 .resume
= si_query_hw_resume
,
1357 static boolean
si_get_query_result(struct pipe_context
*ctx
,
1358 struct pipe_query
*query
, boolean wait
,
1359 union pipe_query_result
*result
)
1361 struct si_context
*sctx
= (struct si_context
*)ctx
;
1362 struct si_query
*rquery
= (struct si_query
*)query
;
1364 return rquery
->ops
->get_result(sctx
, rquery
, wait
, result
);
1367 static void si_get_query_result_resource(struct pipe_context
*ctx
,
1368 struct pipe_query
*query
,
1370 enum pipe_query_value_type result_type
,
1372 struct pipe_resource
*resource
,
1375 struct si_context
*sctx
= (struct si_context
*)ctx
;
1376 struct si_query
*rquery
= (struct si_query
*)query
;
1378 rquery
->ops
->get_result_resource(sctx
, rquery
, wait
, result_type
, index
,
1382 static void si_query_hw_clear_result(struct si_query_hw
*query
,
1383 union pipe_query_result
*result
)
1385 util_query_clear_result(result
, query
->b
.type
);
1388 bool si_query_hw_get_result(struct si_context
*sctx
,
1389 struct si_query
*rquery
,
1390 bool wait
, union pipe_query_result
*result
)
1392 struct si_screen
*sscreen
= sctx
->screen
;
1393 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1394 struct si_query_buffer
*qbuf
;
1396 query
->ops
->clear_result(query
, result
);
1398 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1399 unsigned usage
= PIPE_TRANSFER_READ
|
1400 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
);
1401 unsigned results_base
= 0;
1404 if (rquery
->b
.flushed
)
1405 map
= sctx
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
, usage
);
1407 map
= si_buffer_map_sync_with_rings(sctx
, qbuf
->buf
, usage
);
1412 while (results_base
!= qbuf
->results_end
) {
1413 query
->ops
->add_result(sscreen
, query
, map
+ results_base
,
1415 results_base
+= query
->result_size
;
1419 /* Convert the time to expected units. */
1420 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1421 rquery
->type
== SI_QUERY_TIME_ELAPSED_SDMA
||
1422 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1423 result
->u64
= (1000000 * result
->u64
) / sscreen
->info
.clock_crystal_freq
;
1428 static void si_restore_qbo_state(struct si_context
*sctx
,
1429 struct si_qbo_state
*st
)
1431 sctx
->b
.bind_compute_state(&sctx
->b
, st
->saved_compute
);
1433 sctx
->b
.set_constant_buffer(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1434 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1436 sctx
->b
.set_shader_buffers(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
);
1437 for (unsigned i
= 0; i
< 3; ++i
)
1438 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1441 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
1442 struct si_query
*rquery
,
1444 enum pipe_query_value_type result_type
,
1446 struct pipe_resource
*resource
,
1449 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1450 struct si_query_buffer
*qbuf
;
1451 struct si_query_buffer
*qbuf_prev
;
1452 struct pipe_resource
*tmp_buffer
= NULL
;
1453 unsigned tmp_buffer_offset
= 0;
1454 struct si_qbo_state saved_state
= {};
1455 struct pipe_grid_info grid
= {};
1456 struct pipe_constant_buffer constant_buffer
= {};
1457 struct pipe_shader_buffer ssbo
[3];
1458 struct si_hw_query_params params
;
1460 uint32_t end_offset
;
1461 uint32_t result_stride
;
1462 uint32_t result_count
;
1464 uint32_t fence_offset
;
1465 uint32_t pair_stride
;
1466 uint32_t pair_count
;
1469 if (!sctx
->query_result_shader
) {
1470 sctx
->query_result_shader
= si_create_query_result_cs(sctx
);
1471 if (!sctx
->query_result_shader
)
1475 if (query
->buffer
.previous
) {
1476 u_suballocator_alloc(sctx
->allocator_zeroed_memory
, 16, 16,
1477 &tmp_buffer_offset
, &tmp_buffer
);
1482 si_save_qbo_state(sctx
, &saved_state
);
1484 si_get_hw_query_params(sctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1485 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1486 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1487 consts
.result_stride
= query
->result_size
;
1488 consts
.pair_stride
= params
.pair_stride
;
1489 consts
.pair_count
= params
.pair_count
;
1491 constant_buffer
.buffer_size
= sizeof(consts
);
1492 constant_buffer
.user_buffer
= &consts
;
1494 ssbo
[1].buffer
= tmp_buffer
;
1495 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1496 ssbo
[1].buffer_size
= 16;
1500 sctx
->b
.bind_compute_state(&sctx
->b
, sctx
->query_result_shader
);
1512 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1513 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
)
1515 else if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1516 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1517 consts
.config
|= 8 | 256;
1518 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1519 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1520 consts
.config
|= 32;
1522 switch (result_type
) {
1523 case PIPE_QUERY_TYPE_U64
:
1524 case PIPE_QUERY_TYPE_I64
:
1525 consts
.config
|= 64;
1527 case PIPE_QUERY_TYPE_I32
:
1528 consts
.config
|= 128;
1530 case PIPE_QUERY_TYPE_U32
:
1534 sctx
->flags
|= sctx
->screen
->barrier_flags
.cp_to_L2
;
1536 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1537 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1538 qbuf_prev
= qbuf
->previous
;
1539 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1540 consts
.config
&= ~3;
1541 if (qbuf
!= &query
->buffer
)
1546 /* Only read the last timestamp. */
1548 consts
.result_count
= 0;
1549 consts
.config
|= 16;
1550 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1553 sctx
->b
.set_constant_buffer(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1555 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1556 ssbo
[0].buffer_offset
= params
.start_offset
;
1557 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1559 if (!qbuf
->previous
) {
1560 ssbo
[2].buffer
= resource
;
1561 ssbo
[2].buffer_offset
= offset
;
1562 ssbo
[2].buffer_size
= 8;
1564 r600_resource(resource
)->TC_L2_dirty
= true;
1567 sctx
->b
.set_shader_buffers(&sctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
);
1569 if (wait
&& qbuf
== &query
->buffer
) {
1572 /* Wait for result availability. Wait only for readiness
1573 * of the last entry, since the fence writes should be
1574 * serialized in the CP.
1576 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1577 va
+= params
.fence_offset
;
1579 si_cp_wait_mem(sctx
, sctx
->gfx_cs
, va
, 0x80000000,
1580 0x80000000, WAIT_REG_MEM_EQUAL
);
1583 sctx
->b
.launch_grid(&sctx
->b
, &grid
);
1584 sctx
->flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
1587 si_restore_qbo_state(sctx
, &saved_state
);
1588 pipe_resource_reference(&tmp_buffer
, NULL
);
1591 static void si_render_condition(struct pipe_context
*ctx
,
1592 struct pipe_query
*query
,
1594 enum pipe_render_cond_flag mode
)
1596 struct si_context
*sctx
= (struct si_context
*)ctx
;
1597 struct si_query_hw
*rquery
= (struct si_query_hw
*)query
;
1598 struct si_atom
*atom
= &sctx
->atoms
.s
.render_cond
;
1601 bool needs_workaround
= false;
1603 /* There was a firmware regression in VI which causes successive
1604 * SET_PREDICATION packets to give the wrong answer for
1605 * non-inverted stream overflow predication.
1607 if (((sctx
->chip_class
== VI
&& sctx
->screen
->info
.pfp_fw_feature
< 49) ||
1608 (sctx
->chip_class
== GFX9
&& sctx
->screen
->info
.pfp_fw_feature
< 38)) &&
1610 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
||
1611 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
&&
1612 (rquery
->buffer
.previous
||
1613 rquery
->buffer
.results_end
> rquery
->result_size
)))) {
1614 needs_workaround
= true;
1617 if (needs_workaround
&& !rquery
->workaround_buf
) {
1618 bool old_force_off
= sctx
->render_cond_force_off
;
1619 sctx
->render_cond_force_off
= true;
1621 u_suballocator_alloc(
1622 sctx
->allocator_zeroed_memory
, 8, 8,
1623 &rquery
->workaround_offset
,
1624 (struct pipe_resource
**)&rquery
->workaround_buf
);
1626 /* Reset to NULL to avoid a redundant SET_PREDICATION
1627 * from launching the compute grid.
1629 sctx
->render_cond
= NULL
;
1631 ctx
->get_query_result_resource(
1632 ctx
, query
, true, PIPE_QUERY_TYPE_U64
, 0,
1633 &rquery
->workaround_buf
->b
.b
, rquery
->workaround_offset
);
1635 /* Settings this in the render cond atom is too late,
1636 * so set it here. */
1637 sctx
->flags
|= sctx
->screen
->barrier_flags
.L2_to_cp
|
1638 SI_CONTEXT_FLUSH_FOR_RENDER_COND
;
1640 sctx
->render_cond_force_off
= old_force_off
;
1644 sctx
->render_cond
= query
;
1645 sctx
->render_cond_invert
= condition
;
1646 sctx
->render_cond_mode
= mode
;
1648 si_set_atom_dirty(sctx
, atom
, query
!= NULL
);
1651 void si_suspend_queries(struct si_context
*sctx
)
1653 struct si_query
*query
;
1655 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, active_list
)
1656 query
->ops
->suspend(sctx
, query
);
1659 void si_resume_queries(struct si_context
*sctx
)
1661 struct si_query
*query
;
1663 /* Check CS space here. Resuming must not be interrupted by flushes. */
1664 si_need_gfx_cs_space(sctx
);
1666 LIST_FOR_EACH_ENTRY(query
, &sctx
->active_queries
, active_list
)
1667 query
->ops
->resume(sctx
, query
);
1670 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1673 .query_type = SI_QUERY_##query_type_, \
1674 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1675 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1676 .group_id = group_id_ \
1679 #define X(name_, query_type_, type_, result_type_) \
1680 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1682 #define XG(group_, name_, query_type_, type_, result_type_) \
1683 XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1685 static struct pipe_driver_query_info si_driver_query_list
[] = {
1686 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1687 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1688 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1689 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1690 X("decompress-calls", DECOMPRESS_CALLS
, UINT64
, AVERAGE
),
1691 X("MRT-draw-calls", MRT_DRAW_CALLS
, UINT64
, AVERAGE
),
1692 X("prim-restart-calls", PRIM_RESTART_CALLS
, UINT64
, AVERAGE
),
1693 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1694 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1695 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1696 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1697 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1698 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1699 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1700 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1701 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1702 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1703 X("num-L2-invalidates", NUM_L2_INVALIDATES
, UINT64
, AVERAGE
),
1704 X("num-L2-writebacks", NUM_L2_WRITEBACKS
, UINT64
, AVERAGE
),
1705 X("num-resident-handles", NUM_RESIDENT_HANDLES
, UINT64
, AVERAGE
),
1706 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS
, UINT64
, AVERAGE
),
1707 X("tc-direct-slots", TC_DIRECT_SLOTS
, UINT64
, AVERAGE
),
1708 X("tc-num-syncs", TC_NUM_SYNCS
, UINT64
, AVERAGE
),
1709 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1710 X("gallium-thread-busy", GALLIUM_THREAD_BUSY
, UINT64
, AVERAGE
),
1711 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1712 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1713 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1714 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1715 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1716 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1717 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1718 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1719 X("GFX-BO-list-size", GFX_BO_LIST_SIZE
, UINT64
, AVERAGE
),
1720 X("GFX-IB-size", GFX_IB_SIZE
, UINT64
, AVERAGE
),
1721 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1722 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1723 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS
, UINT64
, CUMULATIVE
),
1724 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1725 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1726 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1727 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1729 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1730 * which use it as a fallback path to detect the GPU type.
1732 * Note: The names of these queries are significant for GPUPerfStudio
1733 * (and possibly their order as well). */
1734 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1735 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1736 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1737 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1738 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1740 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1741 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1742 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1744 /* The following queries must be at the end of the list because their
1745 * availability is adjusted dynamically based on the DRM version. */
1746 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1747 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
1748 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
1749 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
1750 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
1751 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
1752 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
1753 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
1754 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
1755 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
1756 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
1757 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
1758 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
1759 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
1762 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
1765 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
1766 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
1767 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
1768 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
1769 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY
, UINT64
, AVERAGE
),
1770 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
1777 static unsigned si_get_num_queries(struct si_screen
*sscreen
)
1780 if (sscreen
->info
.drm_major
== 3) {
1781 if (sscreen
->info
.chip_class
>= VI
)
1782 return ARRAY_SIZE(si_driver_query_list
);
1784 return ARRAY_SIZE(si_driver_query_list
) - 7;
1788 if (sscreen
->info
.has_read_registers_query
) {
1789 if (sscreen
->info
.chip_class
== CIK
)
1790 return ARRAY_SIZE(si_driver_query_list
) - 6;
1792 return ARRAY_SIZE(si_driver_query_list
) - 7;
1795 return ARRAY_SIZE(si_driver_query_list
) - 21;
1798 static int si_get_driver_query_info(struct pipe_screen
*screen
,
1800 struct pipe_driver_query_info
*info
)
1802 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1803 unsigned num_queries
= si_get_num_queries(sscreen
);
1806 unsigned num_perfcounters
=
1807 si_get_perfcounter_info(sscreen
, 0, NULL
);
1809 return num_queries
+ num_perfcounters
;
1812 if (index
>= num_queries
)
1813 return si_get_perfcounter_info(sscreen
, index
- num_queries
, info
);
1815 *info
= si_driver_query_list
[index
];
1817 switch (info
->query_type
) {
1818 case SI_QUERY_REQUESTED_VRAM
:
1819 case SI_QUERY_VRAM_USAGE
:
1820 case SI_QUERY_MAPPED_VRAM
:
1821 info
->max_value
.u64
= sscreen
->info
.vram_size
;
1823 case SI_QUERY_REQUESTED_GTT
:
1824 case SI_QUERY_GTT_USAGE
:
1825 case SI_QUERY_MAPPED_GTT
:
1826 info
->max_value
.u64
= sscreen
->info
.gart_size
;
1828 case SI_QUERY_GPU_TEMPERATURE
:
1829 info
->max_value
.u64
= 125;
1831 case SI_QUERY_VRAM_VIS_USAGE
:
1832 info
->max_value
.u64
= sscreen
->info
.vram_vis_size
;
1836 if (info
->group_id
!= ~(unsigned)0 && sscreen
->perfcounters
)
1837 info
->group_id
+= sscreen
->perfcounters
->num_groups
;
1842 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1843 * performance counter groups, so be careful when changing this and related
1846 static int si_get_driver_query_group_info(struct pipe_screen
*screen
,
1848 struct pipe_driver_query_group_info
*info
)
1850 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1851 unsigned num_pc_groups
= 0;
1853 if (sscreen
->perfcounters
)
1854 num_pc_groups
= sscreen
->perfcounters
->num_groups
;
1857 return num_pc_groups
+ SI_NUM_SW_QUERY_GROUPS
;
1859 if (index
< num_pc_groups
)
1860 return si_get_perfcounter_group_info(sscreen
, index
, info
);
1862 index
-= num_pc_groups
;
1863 if (index
>= SI_NUM_SW_QUERY_GROUPS
)
1866 info
->name
= "GPIN";
1867 info
->max_active_queries
= 5;
1868 info
->num_queries
= 5;
1872 void si_init_query_functions(struct si_context
*sctx
)
1874 sctx
->b
.create_query
= si_create_query
;
1875 sctx
->b
.create_batch_query
= si_create_batch_query
;
1876 sctx
->b
.destroy_query
= si_destroy_query
;
1877 sctx
->b
.begin_query
= si_begin_query
;
1878 sctx
->b
.end_query
= si_end_query
;
1879 sctx
->b
.get_query_result
= si_get_query_result
;
1880 sctx
->b
.get_query_result_resource
= si_get_query_result_resource
;
1881 sctx
->atoms
.s
.render_cond
.emit
= si_emit_query_predication
;
1883 if (((struct si_screen
*)sctx
->b
.screen
)->info
.num_render_backends
> 0)
1884 sctx
->b
.render_condition
= si_render_condition
;
1886 LIST_INITHEAD(&sctx
->active_queries
);
1889 void si_init_screen_query_functions(struct si_screen
*sscreen
)
1891 sscreen
->b
.get_driver_query_info
= si_get_driver_query_info
;
1892 sscreen
->b
.get_driver_query_group_info
= si_get_driver_query_group_info
;