2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "radeonsi/si_pipe.h"
28 #include "r600_query.h"
30 #include "util/u_memory.h"
31 #include "util/u_upload_mgr.h"
32 #include "util/os_time.h"
33 #include "tgsi/tgsi_text.h"
34 #include "amd/common/sid.h"
36 #define SI_MAX_STREAMS 4
38 struct si_hw_query_params
{
39 unsigned start_offset
;
41 unsigned fence_offset
;
46 /* Queries without buffer handling or suspend/resume. */
50 uint64_t begin_result
;
56 /* Fence for GPU_FINISHED. */
57 struct pipe_fence_handle
*fence
;
60 static void si_query_sw_destroy(struct si_screen
*sscreen
,
61 struct si_query
*rquery
)
63 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
65 sscreen
->b
.fence_reference(&sscreen
->b
, &query
->fence
, NULL
);
69 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
72 case SI_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
73 case SI_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
74 case SI_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
75 case SI_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
76 case SI_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
77 case SI_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
78 case SI_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
79 case SI_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
80 case SI_QUERY_GFX_BO_LIST_SIZE
: return RADEON_GFX_BO_LIST_COUNTER
;
81 case SI_QUERY_GFX_IB_SIZE
: return RADEON_GFX_IB_SIZE_COUNTER
;
82 case SI_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
83 case SI_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
84 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS
;
85 case SI_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
86 case SI_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
87 case SI_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
88 case SI_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
89 case SI_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
90 case SI_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
91 case SI_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
92 default: unreachable("query type does not correspond to winsys id");
96 static bool si_query_sw_begin(struct si_context
*sctx
,
97 struct si_query
*rquery
)
99 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
100 enum radeon_value_id ws_id
;
102 switch(query
->b
.type
) {
103 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
104 case PIPE_QUERY_GPU_FINISHED
:
106 case SI_QUERY_DRAW_CALLS
:
107 query
->begin_result
= sctx
->b
.num_draw_calls
;
109 case SI_QUERY_DECOMPRESS_CALLS
:
110 query
->begin_result
= sctx
->b
.num_decompress_calls
;
112 case SI_QUERY_MRT_DRAW_CALLS
:
113 query
->begin_result
= sctx
->b
.num_mrt_draw_calls
;
115 case SI_QUERY_PRIM_RESTART_CALLS
:
116 query
->begin_result
= sctx
->b
.num_prim_restart_calls
;
118 case SI_QUERY_SPILL_DRAW_CALLS
:
119 query
->begin_result
= sctx
->b
.num_spill_draw_calls
;
121 case SI_QUERY_COMPUTE_CALLS
:
122 query
->begin_result
= sctx
->b
.num_compute_calls
;
124 case SI_QUERY_SPILL_COMPUTE_CALLS
:
125 query
->begin_result
= sctx
->b
.num_spill_compute_calls
;
127 case SI_QUERY_DMA_CALLS
:
128 query
->begin_result
= sctx
->b
.num_dma_calls
;
130 case SI_QUERY_CP_DMA_CALLS
:
131 query
->begin_result
= sctx
->b
.num_cp_dma_calls
;
133 case SI_QUERY_NUM_VS_FLUSHES
:
134 query
->begin_result
= sctx
->b
.num_vs_flushes
;
136 case SI_QUERY_NUM_PS_FLUSHES
:
137 query
->begin_result
= sctx
->b
.num_ps_flushes
;
139 case SI_QUERY_NUM_CS_FLUSHES
:
140 query
->begin_result
= sctx
->b
.num_cs_flushes
;
142 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
143 query
->begin_result
= sctx
->b
.num_cb_cache_flushes
;
145 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
146 query
->begin_result
= sctx
->b
.num_db_cache_flushes
;
148 case SI_QUERY_NUM_L2_INVALIDATES
:
149 query
->begin_result
= sctx
->b
.num_L2_invalidates
;
151 case SI_QUERY_NUM_L2_WRITEBACKS
:
152 query
->begin_result
= sctx
->b
.num_L2_writebacks
;
154 case SI_QUERY_NUM_RESIDENT_HANDLES
:
155 query
->begin_result
= sctx
->b
.num_resident_handles
;
157 case SI_QUERY_TC_OFFLOADED_SLOTS
:
158 query
->begin_result
= sctx
->b
.tc
? sctx
->b
.tc
->num_offloaded_slots
: 0;
160 case SI_QUERY_TC_DIRECT_SLOTS
:
161 query
->begin_result
= sctx
->b
.tc
? sctx
->b
.tc
->num_direct_slots
: 0;
163 case SI_QUERY_TC_NUM_SYNCS
:
164 query
->begin_result
= sctx
->b
.tc
? sctx
->b
.tc
->num_syncs
: 0;
166 case SI_QUERY_REQUESTED_VRAM
:
167 case SI_QUERY_REQUESTED_GTT
:
168 case SI_QUERY_MAPPED_VRAM
:
169 case SI_QUERY_MAPPED_GTT
:
170 case SI_QUERY_VRAM_USAGE
:
171 case SI_QUERY_VRAM_VIS_USAGE
:
172 case SI_QUERY_GTT_USAGE
:
173 case SI_QUERY_GPU_TEMPERATURE
:
174 case SI_QUERY_CURRENT_GPU_SCLK
:
175 case SI_QUERY_CURRENT_GPU_MCLK
:
176 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
177 case SI_QUERY_NUM_MAPPED_BUFFERS
:
178 query
->begin_result
= 0;
180 case SI_QUERY_BUFFER_WAIT_TIME
:
181 case SI_QUERY_GFX_IB_SIZE
:
182 case SI_QUERY_NUM_GFX_IBS
:
183 case SI_QUERY_NUM_SDMA_IBS
:
184 case SI_QUERY_NUM_BYTES_MOVED
:
185 case SI_QUERY_NUM_EVICTIONS
:
186 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
187 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
188 query
->begin_result
= sctx
->b
.ws
->query_value(sctx
->b
.ws
, ws_id
);
191 case SI_QUERY_GFX_BO_LIST_SIZE
:
192 ws_id
= winsys_id_from_type(query
->b
.type
);
193 query
->begin_result
= sctx
->b
.ws
->query_value(sctx
->b
.ws
, ws_id
);
194 query
->begin_time
= sctx
->b
.ws
->query_value(sctx
->b
.ws
,
197 case SI_QUERY_CS_THREAD_BUSY
:
198 ws_id
= winsys_id_from_type(query
->b
.type
);
199 query
->begin_result
= sctx
->b
.ws
->query_value(sctx
->b
.ws
, ws_id
);
200 query
->begin_time
= os_time_get_nano();
202 case SI_QUERY_GALLIUM_THREAD_BUSY
:
203 query
->begin_result
=
204 sctx
->b
.tc
? util_queue_get_thread_time_nano(&sctx
->b
.tc
->queue
, 0) : 0;
205 query
->begin_time
= os_time_get_nano();
207 case SI_QUERY_GPU_LOAD
:
208 case SI_QUERY_GPU_SHADERS_BUSY
:
209 case SI_QUERY_GPU_TA_BUSY
:
210 case SI_QUERY_GPU_GDS_BUSY
:
211 case SI_QUERY_GPU_VGT_BUSY
:
212 case SI_QUERY_GPU_IA_BUSY
:
213 case SI_QUERY_GPU_SX_BUSY
:
214 case SI_QUERY_GPU_WD_BUSY
:
215 case SI_QUERY_GPU_BCI_BUSY
:
216 case SI_QUERY_GPU_SC_BUSY
:
217 case SI_QUERY_GPU_PA_BUSY
:
218 case SI_QUERY_GPU_DB_BUSY
:
219 case SI_QUERY_GPU_CP_BUSY
:
220 case SI_QUERY_GPU_CB_BUSY
:
221 case SI_QUERY_GPU_SDMA_BUSY
:
222 case SI_QUERY_GPU_PFP_BUSY
:
223 case SI_QUERY_GPU_MEQ_BUSY
:
224 case SI_QUERY_GPU_ME_BUSY
:
225 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
226 case SI_QUERY_GPU_CP_DMA_BUSY
:
227 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
228 query
->begin_result
= si_begin_counter(sctx
->screen
,
231 case SI_QUERY_NUM_COMPILATIONS
:
232 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
234 case SI_QUERY_NUM_SHADERS_CREATED
:
235 query
->begin_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
237 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
238 query
->begin_result
=
239 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
241 case SI_QUERY_GPIN_ASIC_ID
:
242 case SI_QUERY_GPIN_NUM_SIMD
:
243 case SI_QUERY_GPIN_NUM_RB
:
244 case SI_QUERY_GPIN_NUM_SPI
:
245 case SI_QUERY_GPIN_NUM_SE
:
248 unreachable("si_query_sw_begin: bad query type");
254 static bool si_query_sw_end(struct si_context
*sctx
,
255 struct si_query
*rquery
)
257 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
258 enum radeon_value_id ws_id
;
260 switch(query
->b
.type
) {
261 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
263 case PIPE_QUERY_GPU_FINISHED
:
264 sctx
->b
.b
.flush(&sctx
->b
.b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
266 case SI_QUERY_DRAW_CALLS
:
267 query
->end_result
= sctx
->b
.num_draw_calls
;
269 case SI_QUERY_DECOMPRESS_CALLS
:
270 query
->end_result
= sctx
->b
.num_decompress_calls
;
272 case SI_QUERY_MRT_DRAW_CALLS
:
273 query
->end_result
= sctx
->b
.num_mrt_draw_calls
;
275 case SI_QUERY_PRIM_RESTART_CALLS
:
276 query
->end_result
= sctx
->b
.num_prim_restart_calls
;
278 case SI_QUERY_SPILL_DRAW_CALLS
:
279 query
->end_result
= sctx
->b
.num_spill_draw_calls
;
281 case SI_QUERY_COMPUTE_CALLS
:
282 query
->end_result
= sctx
->b
.num_compute_calls
;
284 case SI_QUERY_SPILL_COMPUTE_CALLS
:
285 query
->end_result
= sctx
->b
.num_spill_compute_calls
;
287 case SI_QUERY_DMA_CALLS
:
288 query
->end_result
= sctx
->b
.num_dma_calls
;
290 case SI_QUERY_CP_DMA_CALLS
:
291 query
->end_result
= sctx
->b
.num_cp_dma_calls
;
293 case SI_QUERY_NUM_VS_FLUSHES
:
294 query
->end_result
= sctx
->b
.num_vs_flushes
;
296 case SI_QUERY_NUM_PS_FLUSHES
:
297 query
->end_result
= sctx
->b
.num_ps_flushes
;
299 case SI_QUERY_NUM_CS_FLUSHES
:
300 query
->end_result
= sctx
->b
.num_cs_flushes
;
302 case SI_QUERY_NUM_CB_CACHE_FLUSHES
:
303 query
->end_result
= sctx
->b
.num_cb_cache_flushes
;
305 case SI_QUERY_NUM_DB_CACHE_FLUSHES
:
306 query
->end_result
= sctx
->b
.num_db_cache_flushes
;
308 case SI_QUERY_NUM_L2_INVALIDATES
:
309 query
->end_result
= sctx
->b
.num_L2_invalidates
;
311 case SI_QUERY_NUM_L2_WRITEBACKS
:
312 query
->end_result
= sctx
->b
.num_L2_writebacks
;
314 case SI_QUERY_NUM_RESIDENT_HANDLES
:
315 query
->end_result
= sctx
->b
.num_resident_handles
;
317 case SI_QUERY_TC_OFFLOADED_SLOTS
:
318 query
->end_result
= sctx
->b
.tc
? sctx
->b
.tc
->num_offloaded_slots
: 0;
320 case SI_QUERY_TC_DIRECT_SLOTS
:
321 query
->end_result
= sctx
->b
.tc
? sctx
->b
.tc
->num_direct_slots
: 0;
323 case SI_QUERY_TC_NUM_SYNCS
:
324 query
->end_result
= sctx
->b
.tc
? sctx
->b
.tc
->num_syncs
: 0;
326 case SI_QUERY_REQUESTED_VRAM
:
327 case SI_QUERY_REQUESTED_GTT
:
328 case SI_QUERY_MAPPED_VRAM
:
329 case SI_QUERY_MAPPED_GTT
:
330 case SI_QUERY_VRAM_USAGE
:
331 case SI_QUERY_VRAM_VIS_USAGE
:
332 case SI_QUERY_GTT_USAGE
:
333 case SI_QUERY_GPU_TEMPERATURE
:
334 case SI_QUERY_CURRENT_GPU_SCLK
:
335 case SI_QUERY_CURRENT_GPU_MCLK
:
336 case SI_QUERY_BUFFER_WAIT_TIME
:
337 case SI_QUERY_GFX_IB_SIZE
:
338 case SI_QUERY_NUM_MAPPED_BUFFERS
:
339 case SI_QUERY_NUM_GFX_IBS
:
340 case SI_QUERY_NUM_SDMA_IBS
:
341 case SI_QUERY_NUM_BYTES_MOVED
:
342 case SI_QUERY_NUM_EVICTIONS
:
343 case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
344 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
345 query
->end_result
= sctx
->b
.ws
->query_value(sctx
->b
.ws
, ws_id
);
348 case SI_QUERY_GFX_BO_LIST_SIZE
:
349 ws_id
= winsys_id_from_type(query
->b
.type
);
350 query
->end_result
= sctx
->b
.ws
->query_value(sctx
->b
.ws
, ws_id
);
351 query
->end_time
= sctx
->b
.ws
->query_value(sctx
->b
.ws
,
354 case SI_QUERY_CS_THREAD_BUSY
:
355 ws_id
= winsys_id_from_type(query
->b
.type
);
356 query
->end_result
= sctx
->b
.ws
->query_value(sctx
->b
.ws
, ws_id
);
357 query
->end_time
= os_time_get_nano();
359 case SI_QUERY_GALLIUM_THREAD_BUSY
:
361 sctx
->b
.tc
? util_queue_get_thread_time_nano(&sctx
->b
.tc
->queue
, 0) : 0;
362 query
->end_time
= os_time_get_nano();
364 case SI_QUERY_GPU_LOAD
:
365 case SI_QUERY_GPU_SHADERS_BUSY
:
366 case SI_QUERY_GPU_TA_BUSY
:
367 case SI_QUERY_GPU_GDS_BUSY
:
368 case SI_QUERY_GPU_VGT_BUSY
:
369 case SI_QUERY_GPU_IA_BUSY
:
370 case SI_QUERY_GPU_SX_BUSY
:
371 case SI_QUERY_GPU_WD_BUSY
:
372 case SI_QUERY_GPU_BCI_BUSY
:
373 case SI_QUERY_GPU_SC_BUSY
:
374 case SI_QUERY_GPU_PA_BUSY
:
375 case SI_QUERY_GPU_DB_BUSY
:
376 case SI_QUERY_GPU_CP_BUSY
:
377 case SI_QUERY_GPU_CB_BUSY
:
378 case SI_QUERY_GPU_SDMA_BUSY
:
379 case SI_QUERY_GPU_PFP_BUSY
:
380 case SI_QUERY_GPU_MEQ_BUSY
:
381 case SI_QUERY_GPU_ME_BUSY
:
382 case SI_QUERY_GPU_SURF_SYNC_BUSY
:
383 case SI_QUERY_GPU_CP_DMA_BUSY
:
384 case SI_QUERY_GPU_SCRATCH_RAM_BUSY
:
385 query
->end_result
= si_end_counter(sctx
->screen
,
387 query
->begin_result
);
388 query
->begin_result
= 0;
390 case SI_QUERY_NUM_COMPILATIONS
:
391 query
->end_result
= p_atomic_read(&sctx
->screen
->num_compilations
);
393 case SI_QUERY_NUM_SHADERS_CREATED
:
394 query
->end_result
= p_atomic_read(&sctx
->screen
->num_shaders_created
);
396 case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
397 query
->end_result
= sctx
->b
.last_tex_ps_draw_ratio
;
399 case SI_QUERY_NUM_SHADER_CACHE_HITS
:
401 p_atomic_read(&sctx
->screen
->num_shader_cache_hits
);
403 case SI_QUERY_GPIN_ASIC_ID
:
404 case SI_QUERY_GPIN_NUM_SIMD
:
405 case SI_QUERY_GPIN_NUM_RB
:
406 case SI_QUERY_GPIN_NUM_SPI
:
407 case SI_QUERY_GPIN_NUM_SE
:
410 unreachable("si_query_sw_end: bad query type");
416 static bool si_query_sw_get_result(struct si_context
*sctx
,
417 struct si_query
*rquery
,
419 union pipe_query_result
*result
)
421 struct si_query_sw
*query
= (struct si_query_sw
*)rquery
;
423 switch (query
->b
.type
) {
424 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
425 /* Convert from cycles per millisecond to cycles per second (Hz). */
426 result
->timestamp_disjoint
.frequency
=
427 (uint64_t)sctx
->screen
->info
.clock_crystal_freq
* 1000;
428 result
->timestamp_disjoint
.disjoint
= false;
430 case PIPE_QUERY_GPU_FINISHED
: {
431 struct pipe_screen
*screen
= sctx
->b
.b
.screen
;
432 struct pipe_context
*ctx
= rquery
->b
.flushed
? NULL
: &sctx
->b
.b
;
434 result
->b
= screen
->fence_finish(screen
, ctx
, query
->fence
,
435 wait
? PIPE_TIMEOUT_INFINITE
: 0);
439 case SI_QUERY_GFX_BO_LIST_SIZE
:
440 result
->u64
= (query
->end_result
- query
->begin_result
) /
441 (query
->end_time
- query
->begin_time
);
443 case SI_QUERY_CS_THREAD_BUSY
:
444 case SI_QUERY_GALLIUM_THREAD_BUSY
:
445 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
446 (query
->end_time
- query
->begin_time
);
448 case SI_QUERY_GPIN_ASIC_ID
:
451 case SI_QUERY_GPIN_NUM_SIMD
:
452 result
->u32
= sctx
->screen
->info
.num_good_compute_units
;
454 case SI_QUERY_GPIN_NUM_RB
:
455 result
->u32
= sctx
->screen
->info
.num_render_backends
;
457 case SI_QUERY_GPIN_NUM_SPI
:
458 result
->u32
= 1; /* all supported chips have one SPI per SE */
460 case SI_QUERY_GPIN_NUM_SE
:
461 result
->u32
= sctx
->screen
->info
.max_se
;
465 result
->u64
= query
->end_result
- query
->begin_result
;
467 switch (query
->b
.type
) {
468 case SI_QUERY_BUFFER_WAIT_TIME
:
469 case SI_QUERY_GPU_TEMPERATURE
:
472 case SI_QUERY_CURRENT_GPU_SCLK
:
473 case SI_QUERY_CURRENT_GPU_MCLK
:
474 result
->u64
*= 1000000;
482 static struct si_query_ops sw_query_ops
= {
483 .destroy
= si_query_sw_destroy
,
484 .begin
= si_query_sw_begin
,
485 .end
= si_query_sw_end
,
486 .get_result
= si_query_sw_get_result
,
487 .get_result_resource
= NULL
490 static struct pipe_query
*si_query_sw_create(unsigned query_type
)
492 struct si_query_sw
*query
;
494 query
= CALLOC_STRUCT(si_query_sw
);
498 query
->b
.type
= query_type
;
499 query
->b
.ops
= &sw_query_ops
;
501 return (struct pipe_query
*)query
;
504 void si_query_hw_destroy(struct si_screen
*sscreen
,
505 struct si_query
*rquery
)
507 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
508 struct si_query_buffer
*prev
= query
->buffer
.previous
;
510 /* Release all query buffers. */
512 struct si_query_buffer
*qbuf
= prev
;
513 prev
= prev
->previous
;
514 r600_resource_reference(&qbuf
->buf
, NULL
);
518 r600_resource_reference(&query
->buffer
.buf
, NULL
);
519 r600_resource_reference(&query
->workaround_buf
, NULL
);
523 static struct r600_resource
*si_new_query_buffer(struct si_screen
*sscreen
,
524 struct si_query_hw
*query
)
526 unsigned buf_size
= MAX2(query
->result_size
,
527 sscreen
->info
.min_alloc_size
);
529 /* Queries are normally read by the CPU after
530 * being written by the gpu, hence staging is probably a good
533 struct r600_resource
*buf
= (struct r600_resource
*)
534 pipe_buffer_create(&sscreen
->b
, 0,
535 PIPE_USAGE_STAGING
, buf_size
);
539 if (!query
->ops
->prepare_buffer(sscreen
, query
, buf
)) {
540 r600_resource_reference(&buf
, NULL
);
547 static bool si_query_hw_prepare_buffer(struct si_screen
*sscreen
,
548 struct si_query_hw
*query
,
549 struct r600_resource
*buffer
)
551 /* Callers ensure that the buffer is currently unused by the GPU. */
552 uint32_t *results
= sscreen
->ws
->buffer_map(buffer
->buf
, NULL
,
553 PIPE_TRANSFER_WRITE
|
554 PIPE_TRANSFER_UNSYNCHRONIZED
);
558 memset(results
, 0, buffer
->b
.b
.width0
);
560 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
561 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
562 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
563 unsigned max_rbs
= sscreen
->info
.num_render_backends
;
564 unsigned enabled_rb_mask
= sscreen
->info
.enabled_rb_mask
;
565 unsigned num_results
;
568 /* Set top bits for unused backends. */
569 num_results
= buffer
->b
.b
.width0
/ query
->result_size
;
570 for (j
= 0; j
< num_results
; j
++) {
571 for (i
= 0; i
< max_rbs
; i
++) {
572 if (!(enabled_rb_mask
& (1<<i
))) {
573 results
[(i
* 4)+1] = 0x80000000;
574 results
[(i
* 4)+3] = 0x80000000;
577 results
+= 4 * max_rbs
;
584 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
585 struct si_query
*rquery
,
587 enum pipe_query_value_type result_type
,
589 struct pipe_resource
*resource
,
592 static struct si_query_ops query_hw_ops
= {
593 .destroy
= si_query_hw_destroy
,
594 .begin
= si_query_hw_begin
,
595 .end
= si_query_hw_end
,
596 .get_result
= si_query_hw_get_result
,
597 .get_result_resource
= si_query_hw_get_result_resource
,
600 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
601 struct si_query_hw
*query
,
602 struct r600_resource
*buffer
,
604 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
605 struct si_query_hw
*query
,
606 struct r600_resource
*buffer
,
608 static void si_query_hw_add_result(struct si_screen
*sscreen
,
609 struct si_query_hw
*, void *buffer
,
610 union pipe_query_result
*result
);
611 static void si_query_hw_clear_result(struct si_query_hw
*,
612 union pipe_query_result
*);
614 static struct si_query_hw_ops query_hw_default_hw_ops
= {
615 .prepare_buffer
= si_query_hw_prepare_buffer
,
616 .emit_start
= si_query_hw_do_emit_start
,
617 .emit_stop
= si_query_hw_do_emit_stop
,
618 .clear_result
= si_query_hw_clear_result
,
619 .add_result
= si_query_hw_add_result
,
622 bool si_query_hw_init(struct si_screen
*sscreen
,
623 struct si_query_hw
*query
)
625 query
->buffer
.buf
= si_new_query_buffer(sscreen
, query
);
626 if (!query
->buffer
.buf
)
632 static struct pipe_query
*si_query_hw_create(struct si_screen
*sscreen
,
636 struct si_query_hw
*query
= CALLOC_STRUCT(si_query_hw
);
640 query
->b
.type
= query_type
;
641 query
->b
.ops
= &query_hw_ops
;
642 query
->ops
= &query_hw_default_hw_ops
;
644 switch (query_type
) {
645 case PIPE_QUERY_OCCLUSION_COUNTER
:
646 case PIPE_QUERY_OCCLUSION_PREDICATE
:
647 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
648 query
->result_size
= 16 * sscreen
->info
.num_render_backends
;
649 query
->result_size
+= 16; /* for the fence + alignment */
650 query
->num_cs_dw_end
= 6 + si_gfx_write_fence_dwords(sscreen
);
652 case PIPE_QUERY_TIME_ELAPSED
:
653 query
->result_size
= 24;
654 query
->num_cs_dw_end
= 8 + si_gfx_write_fence_dwords(sscreen
);
656 case PIPE_QUERY_TIMESTAMP
:
657 query
->result_size
= 16;
658 query
->num_cs_dw_end
= 8 + si_gfx_write_fence_dwords(sscreen
);
659 query
->flags
= SI_QUERY_HW_FLAG_NO_START
;
661 case PIPE_QUERY_PRIMITIVES_EMITTED
:
662 case PIPE_QUERY_PRIMITIVES_GENERATED
:
663 case PIPE_QUERY_SO_STATISTICS
:
664 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
665 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
666 query
->result_size
= 32;
667 query
->num_cs_dw_end
= 6;
668 query
->stream
= index
;
670 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
671 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
672 query
->result_size
= 32 * SI_MAX_STREAMS
;
673 query
->num_cs_dw_end
= 6 * SI_MAX_STREAMS
;
675 case PIPE_QUERY_PIPELINE_STATISTICS
:
676 /* 11 values on GCN. */
677 query
->result_size
= 11 * 16;
678 query
->result_size
+= 8; /* for the fence + alignment */
679 query
->num_cs_dw_end
= 6 + si_gfx_write_fence_dwords(sscreen
);
687 if (!si_query_hw_init(sscreen
, query
)) {
692 return (struct pipe_query
*)query
;
695 static void si_update_occlusion_query_state(struct si_context
*sctx
,
696 unsigned type
, int diff
)
698 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
699 type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
700 type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
701 bool old_enable
= sctx
->b
.num_occlusion_queries
!= 0;
702 bool old_perfect_enable
=
703 sctx
->b
.num_perfect_occlusion_queries
!= 0;
704 bool enable
, perfect_enable
;
706 sctx
->b
.num_occlusion_queries
+= diff
;
707 assert(sctx
->b
.num_occlusion_queries
>= 0);
709 if (type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
710 sctx
->b
.num_perfect_occlusion_queries
+= diff
;
711 assert(sctx
->b
.num_perfect_occlusion_queries
>= 0);
714 enable
= sctx
->b
.num_occlusion_queries
!= 0;
715 perfect_enable
= sctx
->b
.num_perfect_occlusion_queries
!= 0;
717 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
718 si_set_occlusion_query_state(sctx
, old_perfect_enable
);
723 static unsigned event_type_for_stream(unsigned stream
)
727 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
728 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
729 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
730 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
734 static void emit_sample_streamout(struct radeon_winsys_cs
*cs
, uint64_t va
,
737 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
738 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(stream
)) | EVENT_INDEX(3));
740 radeon_emit(cs
, va
>> 32);
743 static void si_query_hw_do_emit_start(struct si_context
*sctx
,
744 struct si_query_hw
*query
,
745 struct r600_resource
*buffer
,
748 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx_cs
;
750 switch (query
->b
.type
) {
751 case PIPE_QUERY_OCCLUSION_COUNTER
:
752 case PIPE_QUERY_OCCLUSION_PREDICATE
:
753 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
754 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
755 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
757 radeon_emit(cs
, va
>> 32);
759 case PIPE_QUERY_PRIMITIVES_EMITTED
:
760 case PIPE_QUERY_PRIMITIVES_GENERATED
:
761 case PIPE_QUERY_SO_STATISTICS
:
762 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
763 emit_sample_streamout(cs
, va
, query
->stream
);
765 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
766 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
767 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
769 case PIPE_QUERY_TIME_ELAPSED
:
770 /* Write the timestamp from the CP not waiting for
771 * outstanding draws (top-of-pipe).
773 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
774 radeon_emit(cs
, COPY_DATA_COUNT_SEL
|
775 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
776 COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC
));
780 radeon_emit(cs
, va
>> 32);
782 case PIPE_QUERY_PIPELINE_STATISTICS
:
783 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
784 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
786 radeon_emit(cs
, va
>> 32);
791 radeon_add_to_buffer_list(sctx
, sctx
->b
.gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
795 static void si_query_hw_emit_start(struct si_context
*sctx
,
796 struct si_query_hw
*query
)
800 if (!query
->buffer
.buf
)
801 return; // previous buffer allocation failure
803 si_update_occlusion_query_state(sctx
, query
->b
.type
, 1);
804 si_update_prims_generated_query_state(sctx
, query
->b
.type
, 1);
806 si_need_gfx_cs_space(sctx
);
808 /* Get a new query buffer if needed. */
809 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
810 struct si_query_buffer
*qbuf
= MALLOC_STRUCT(si_query_buffer
);
811 *qbuf
= query
->buffer
;
812 query
->buffer
.results_end
= 0;
813 query
->buffer
.previous
= qbuf
;
814 query
->buffer
.buf
= si_new_query_buffer(sctx
->screen
, query
);
815 if (!query
->buffer
.buf
)
819 /* emit begin query */
820 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
822 query
->ops
->emit_start(sctx
, query
, query
->buffer
.buf
, va
);
824 sctx
->b
.num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
827 static void si_query_hw_do_emit_stop(struct si_context
*sctx
,
828 struct si_query_hw
*query
,
829 struct r600_resource
*buffer
,
832 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx_cs
;
833 uint64_t fence_va
= 0;
835 switch (query
->b
.type
) {
836 case PIPE_QUERY_OCCLUSION_COUNTER
:
837 case PIPE_QUERY_OCCLUSION_PREDICATE
:
838 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
840 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
841 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
843 radeon_emit(cs
, va
>> 32);
845 fence_va
= va
+ sctx
->screen
->info
.num_render_backends
* 16 - 8;
847 case PIPE_QUERY_PRIMITIVES_EMITTED
:
848 case PIPE_QUERY_PRIMITIVES_GENERATED
:
849 case PIPE_QUERY_SO_STATISTICS
:
850 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
852 emit_sample_streamout(cs
, va
, query
->stream
);
854 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
856 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
)
857 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
859 case PIPE_QUERY_TIME_ELAPSED
:
862 case PIPE_QUERY_TIMESTAMP
:
863 si_gfx_write_event_eop(sctx
, V_028A90_BOTTOM_OF_PIPE_TS
,
864 0, EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
868 case PIPE_QUERY_PIPELINE_STATISTICS
: {
869 unsigned sample_size
= (query
->result_size
- 8) / 2;
872 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
873 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
875 radeon_emit(cs
, va
>> 32);
877 fence_va
= va
+ sample_size
;
883 radeon_add_to_buffer_list(sctx
, sctx
->b
.gfx_cs
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
887 si_gfx_write_event_eop(sctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
888 EOP_DATA_SEL_VALUE_32BIT
,
889 query
->buffer
.buf
, fence_va
, 0x80000000,
893 static void si_query_hw_emit_stop(struct si_context
*sctx
,
894 struct si_query_hw
*query
)
898 if (!query
->buffer
.buf
)
899 return; // previous buffer allocation failure
901 /* The queries which need begin already called this in begin_query. */
902 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
)
903 si_need_gfx_cs_space(sctx
);
906 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
908 query
->ops
->emit_stop(sctx
, query
, query
->buffer
.buf
, va
);
910 query
->buffer
.results_end
+= query
->result_size
;
912 if (!(query
->flags
& SI_QUERY_HW_FLAG_NO_START
))
913 sctx
->b
.num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
915 si_update_occlusion_query_state(sctx
, query
->b
.type
, -1);
916 si_update_prims_generated_query_state(sctx
, query
->b
.type
, -1);
919 static void emit_set_predicate(struct si_context
*ctx
,
920 struct r600_resource
*buf
, uint64_t va
,
923 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx_cs
;
925 if (ctx
->b
.chip_class
>= GFX9
) {
926 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
929 radeon_emit(cs
, va
>> 32);
931 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
933 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
935 radeon_add_to_buffer_list(ctx
, ctx
->b
.gfx_cs
, buf
, RADEON_USAGE_READ
,
939 static void si_emit_query_predication(struct si_context
*ctx
,
940 struct r600_atom
*atom
)
942 struct si_query_hw
*query
= (struct si_query_hw
*)ctx
->b
.render_cond
;
943 struct si_query_buffer
*qbuf
;
945 bool flag_wait
, invert
;
950 invert
= ctx
->b
.render_cond_invert
;
951 flag_wait
= ctx
->b
.render_cond_mode
== PIPE_RENDER_COND_WAIT
||
952 ctx
->b
.render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
954 if (query
->workaround_buf
) {
955 op
= PRED_OP(PREDICATION_OP_BOOL64
);
957 switch (query
->b
.type
) {
958 case PIPE_QUERY_OCCLUSION_COUNTER
:
959 case PIPE_QUERY_OCCLUSION_PREDICATE
:
960 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
961 op
= PRED_OP(PREDICATION_OP_ZPASS
);
963 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
964 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
965 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
974 /* if true then invert, see GL_ARB_conditional_render_inverted */
976 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visible or overflow */
978 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visible or no overflow */
980 /* Use the value written by compute shader as a workaround. Note that
981 * the wait flag does not apply in this predication mode.
983 * The shader outputs the result value to L2. Workarounds only affect VI
984 * and later, where the CP reads data from L2, so we don't need an
987 if (query
->workaround_buf
) {
988 uint64_t va
= query
->workaround_buf
->gpu_address
+ query
->workaround_offset
;
989 emit_set_predicate(ctx
, query
->workaround_buf
, va
, op
);
993 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
995 /* emit predicate packets for all data blocks */
996 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
997 unsigned results_base
= 0;
998 uint64_t va_base
= qbuf
->buf
->gpu_address
;
1000 while (results_base
< qbuf
->results_end
) {
1001 uint64_t va
= va_base
+ results_base
;
1003 if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
) {
1004 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1005 emit_set_predicate(ctx
, qbuf
->buf
, va
+ 32 * stream
, op
);
1007 /* set CONTINUE bit for all packets except the first */
1008 op
|= PREDICATION_CONTINUE
;
1011 emit_set_predicate(ctx
, qbuf
->buf
, va
, op
);
1012 op
|= PREDICATION_CONTINUE
;
1015 results_base
+= query
->result_size
;
1020 static struct pipe_query
*si_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
1022 struct si_screen
*sscreen
=
1023 (struct si_screen
*)ctx
->screen
;
1025 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
1026 query_type
== PIPE_QUERY_GPU_FINISHED
||
1027 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
1028 return si_query_sw_create(query_type
);
1030 return si_query_hw_create(sscreen
, query_type
, index
);
1033 static void si_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1035 struct si_context
*sctx
= (struct si_context
*)ctx
;
1036 struct si_query
*rquery
= (struct si_query
*)query
;
1038 rquery
->ops
->destroy(sctx
->screen
, rquery
);
1041 static boolean
si_begin_query(struct pipe_context
*ctx
,
1042 struct pipe_query
*query
)
1044 struct si_context
*sctx
= (struct si_context
*)ctx
;
1045 struct si_query
*rquery
= (struct si_query
*)query
;
1047 return rquery
->ops
->begin(sctx
, rquery
);
1050 void si_query_hw_reset_buffers(struct si_context
*sctx
,
1051 struct si_query_hw
*query
)
1053 struct si_query_buffer
*prev
= query
->buffer
.previous
;
1055 /* Discard the old query buffers. */
1057 struct si_query_buffer
*qbuf
= prev
;
1058 prev
= prev
->previous
;
1059 r600_resource_reference(&qbuf
->buf
, NULL
);
1063 query
->buffer
.results_end
= 0;
1064 query
->buffer
.previous
= NULL
;
1066 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1067 if (si_rings_is_buffer_referenced(sctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
1068 !sctx
->b
.ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
1069 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1070 query
->buffer
.buf
= si_new_query_buffer(sctx
->screen
, query
);
1072 if (!query
->ops
->prepare_buffer(sctx
->screen
, query
, query
->buffer
.buf
))
1073 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1077 bool si_query_hw_begin(struct si_context
*sctx
,
1078 struct si_query
*rquery
)
1080 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1082 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
) {
1087 if (!(query
->flags
& SI_QUERY_HW_FLAG_BEGIN_RESUMES
))
1088 si_query_hw_reset_buffers(sctx
, query
);
1090 r600_resource_reference(&query
->workaround_buf
, NULL
);
1092 si_query_hw_emit_start(sctx
, query
);
1093 if (!query
->buffer
.buf
)
1096 LIST_ADDTAIL(&query
->list
, &sctx
->b
.active_queries
);
1100 static bool si_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1102 struct si_context
*sctx
= (struct si_context
*)ctx
;
1103 struct si_query
*rquery
= (struct si_query
*)query
;
1105 return rquery
->ops
->end(sctx
, rquery
);
1108 bool si_query_hw_end(struct si_context
*sctx
,
1109 struct si_query
*rquery
)
1111 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1113 if (query
->flags
& SI_QUERY_HW_FLAG_NO_START
)
1114 si_query_hw_reset_buffers(sctx
, query
);
1116 si_query_hw_emit_stop(sctx
, query
);
1118 if (!(query
->flags
& SI_QUERY_HW_FLAG_NO_START
))
1119 LIST_DELINIT(&query
->list
);
1121 if (!query
->buffer
.buf
)
1127 static void si_get_hw_query_params(struct si_context
*sctx
,
1128 struct si_query_hw
*rquery
, int index
,
1129 struct si_hw_query_params
*params
)
1131 unsigned max_rbs
= sctx
->screen
->info
.num_render_backends
;
1133 params
->pair_stride
= 0;
1134 params
->pair_count
= 1;
1136 switch (rquery
->b
.type
) {
1137 case PIPE_QUERY_OCCLUSION_COUNTER
:
1138 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1139 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1140 params
->start_offset
= 0;
1141 params
->end_offset
= 8;
1142 params
->fence_offset
= max_rbs
* 16;
1143 params
->pair_stride
= 16;
1144 params
->pair_count
= max_rbs
;
1146 case PIPE_QUERY_TIME_ELAPSED
:
1147 params
->start_offset
= 0;
1148 params
->end_offset
= 8;
1149 params
->fence_offset
= 16;
1151 case PIPE_QUERY_TIMESTAMP
:
1152 params
->start_offset
= 0;
1153 params
->end_offset
= 0;
1154 params
->fence_offset
= 8;
1156 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1157 params
->start_offset
= 8;
1158 params
->end_offset
= 24;
1159 params
->fence_offset
= params
->end_offset
+ 4;
1161 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1162 params
->start_offset
= 0;
1163 params
->end_offset
= 16;
1164 params
->fence_offset
= params
->end_offset
+ 4;
1166 case PIPE_QUERY_SO_STATISTICS
:
1167 params
->start_offset
= 8 - index
* 8;
1168 params
->end_offset
= 24 - index
* 8;
1169 params
->fence_offset
= params
->end_offset
+ 4;
1171 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1172 params
->pair_count
= SI_MAX_STREAMS
;
1173 params
->pair_stride
= 32;
1174 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1175 params
->start_offset
= 0;
1176 params
->end_offset
= 16;
1178 /* We can re-use the high dword of the last 64-bit value as a
1179 * fence: it is initialized as 0, and the high bit is set by
1180 * the write of the streamout stats event.
1182 params
->fence_offset
= rquery
->result_size
- 4;
1184 case PIPE_QUERY_PIPELINE_STATISTICS
:
1186 /* Offsets apply to EG+ */
1187 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1188 params
->start_offset
= offsets
[index
];
1189 params
->end_offset
= 88 + offsets
[index
];
1190 params
->fence_offset
= 2 * 88;
1194 unreachable("si_get_hw_query_params unsupported");
1198 static unsigned si_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1199 bool test_status_bit
)
1201 uint32_t *current_result
= (uint32_t*)map
;
1202 uint64_t start
, end
;
1204 start
= (uint64_t)current_result
[start_index
] |
1205 (uint64_t)current_result
[start_index
+1] << 32;
1206 end
= (uint64_t)current_result
[end_index
] |
1207 (uint64_t)current_result
[end_index
+1] << 32;
1209 if (!test_status_bit
||
1210 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1216 static void si_query_hw_add_result(struct si_screen
*sscreen
,
1217 struct si_query_hw
*query
,
1219 union pipe_query_result
*result
)
1221 unsigned max_rbs
= sscreen
->info
.num_render_backends
;
1223 switch (query
->b
.type
) {
1224 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1225 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1226 unsigned results_base
= i
* 16;
1228 si_query_read_result(buffer
+ results_base
, 0, 2, true);
1232 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1233 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1234 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1235 unsigned results_base
= i
* 16;
1236 result
->b
= result
->b
||
1237 si_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1241 case PIPE_QUERY_TIME_ELAPSED
:
1242 result
->u64
+= si_query_read_result(buffer
, 0, 2, false);
1244 case PIPE_QUERY_TIMESTAMP
:
1245 result
->u64
= *(uint64_t*)buffer
;
1247 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1248 /* SAMPLE_STREAMOUTSTATS stores this structure:
1250 * u64 NumPrimitivesWritten;
1251 * u64 PrimitiveStorageNeeded;
1253 * We only need NumPrimitivesWritten here. */
1254 result
->u64
+= si_query_read_result(buffer
, 2, 6, true);
1256 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1257 /* Here we read PrimitiveStorageNeeded. */
1258 result
->u64
+= si_query_read_result(buffer
, 0, 4, true);
1260 case PIPE_QUERY_SO_STATISTICS
:
1261 result
->so_statistics
.num_primitives_written
+=
1262 si_query_read_result(buffer
, 2, 6, true);
1263 result
->so_statistics
.primitives_storage_needed
+=
1264 si_query_read_result(buffer
, 0, 4, true);
1266 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1267 result
->b
= result
->b
||
1268 si_query_read_result(buffer
, 2, 6, true) !=
1269 si_query_read_result(buffer
, 0, 4, true);
1271 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1272 for (unsigned stream
= 0; stream
< SI_MAX_STREAMS
; ++stream
) {
1273 result
->b
= result
->b
||
1274 si_query_read_result(buffer
, 2, 6, true) !=
1275 si_query_read_result(buffer
, 0, 4, true);
1276 buffer
= (char *)buffer
+ 32;
1279 case PIPE_QUERY_PIPELINE_STATISTICS
:
1280 result
->pipeline_statistics
.ps_invocations
+=
1281 si_query_read_result(buffer
, 0, 22, false);
1282 result
->pipeline_statistics
.c_primitives
+=
1283 si_query_read_result(buffer
, 2, 24, false);
1284 result
->pipeline_statistics
.c_invocations
+=
1285 si_query_read_result(buffer
, 4, 26, false);
1286 result
->pipeline_statistics
.vs_invocations
+=
1287 si_query_read_result(buffer
, 6, 28, false);
1288 result
->pipeline_statistics
.gs_invocations
+=
1289 si_query_read_result(buffer
, 8, 30, false);
1290 result
->pipeline_statistics
.gs_primitives
+=
1291 si_query_read_result(buffer
, 10, 32, false);
1292 result
->pipeline_statistics
.ia_primitives
+=
1293 si_query_read_result(buffer
, 12, 34, false);
1294 result
->pipeline_statistics
.ia_vertices
+=
1295 si_query_read_result(buffer
, 14, 36, false);
1296 result
->pipeline_statistics
.hs_invocations
+=
1297 si_query_read_result(buffer
, 16, 38, false);
1298 result
->pipeline_statistics
.ds_invocations
+=
1299 si_query_read_result(buffer
, 18, 40, false);
1300 result
->pipeline_statistics
.cs_invocations
+=
1301 si_query_read_result(buffer
, 20, 42, false);
1302 #if 0 /* for testing */
1303 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1304 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1305 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1306 result
->pipeline_statistics
.ia_vertices
,
1307 result
->pipeline_statistics
.ia_primitives
,
1308 result
->pipeline_statistics
.vs_invocations
,
1309 result
->pipeline_statistics
.hs_invocations
,
1310 result
->pipeline_statistics
.ds_invocations
,
1311 result
->pipeline_statistics
.gs_invocations
,
1312 result
->pipeline_statistics
.gs_primitives
,
1313 result
->pipeline_statistics
.c_invocations
,
1314 result
->pipeline_statistics
.c_primitives
,
1315 result
->pipeline_statistics
.ps_invocations
,
1316 result
->pipeline_statistics
.cs_invocations
);
1324 static boolean
si_get_query_result(struct pipe_context
*ctx
,
1325 struct pipe_query
*query
, boolean wait
,
1326 union pipe_query_result
*result
)
1328 struct si_context
*sctx
= (struct si_context
*)ctx
;
1329 struct si_query
*rquery
= (struct si_query
*)query
;
1331 return rquery
->ops
->get_result(sctx
, rquery
, wait
, result
);
1334 static void si_get_query_result_resource(struct pipe_context
*ctx
,
1335 struct pipe_query
*query
,
1337 enum pipe_query_value_type result_type
,
1339 struct pipe_resource
*resource
,
1342 struct si_context
*sctx
= (struct si_context
*)ctx
;
1343 struct si_query
*rquery
= (struct si_query
*)query
;
1345 rquery
->ops
->get_result_resource(sctx
, rquery
, wait
, result_type
, index
,
1349 static void si_query_hw_clear_result(struct si_query_hw
*query
,
1350 union pipe_query_result
*result
)
1352 util_query_clear_result(result
, query
->b
.type
);
1355 bool si_query_hw_get_result(struct si_context
*sctx
,
1356 struct si_query
*rquery
,
1357 bool wait
, union pipe_query_result
*result
)
1359 struct si_screen
*sscreen
= sctx
->screen
;
1360 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1361 struct si_query_buffer
*qbuf
;
1363 query
->ops
->clear_result(query
, result
);
1365 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1366 unsigned usage
= PIPE_TRANSFER_READ
|
1367 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
);
1368 unsigned results_base
= 0;
1371 if (rquery
->b
.flushed
)
1372 map
= sctx
->b
.ws
->buffer_map(qbuf
->buf
->buf
, NULL
, usage
);
1374 map
= si_buffer_map_sync_with_rings(sctx
, qbuf
->buf
, usage
);
1379 while (results_base
!= qbuf
->results_end
) {
1380 query
->ops
->add_result(sscreen
, query
, map
+ results_base
,
1382 results_base
+= query
->result_size
;
1386 /* Convert the time to expected units. */
1387 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1388 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1389 result
->u64
= (1000000 * result
->u64
) / sscreen
->info
.clock_crystal_freq
;
1394 /* Create the compute shader that is used to collect the results.
1396 * One compute grid with a single thread is launched for every query result
1397 * buffer. The thread (optionally) reads a previous summary buffer, then
1398 * accumulates data from the query result buffer, and writes the result either
1399 * to a summary buffer to be consumed by the next grid invocation or to the
1400 * user-supplied buffer.
1406 * 0.y = result_stride
1407 * 0.z = result_count
1409 * 1: read previously accumulated values
1410 * 2: write accumulated values for chaining
1411 * 4: write result available
1412 * 8: convert result to boolean (0/1)
1413 * 16: only read one dword and use that as result
1414 * 32: apply timestamp conversion
1415 * 64: store full 64 bits result
1416 * 128: store signed 32 bits result
1417 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1418 * 1.x = fence_offset
1422 * BUFFER[0] = query result buffer
1423 * BUFFER[1] = previous summary buffer
1424 * BUFFER[2] = next summary buffer or user-supplied buffer
1426 static void si_create_query_result_shader(struct si_context
*sctx
)
1428 /* TEMP[0].xy = accumulated result so far
1429 * TEMP[0].z = result not available
1431 * TEMP[1].x = current result index
1432 * TEMP[1].y = current pair index
1434 static const char text_tmpl
[] =
1436 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1437 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1438 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1442 "DCL CONST[0][0..1]\n"
1444 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1445 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1446 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1447 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1448 "IMM[4] UINT32 {256, 0, 0, 0}\n"
1450 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1452 /* Check result availability. */
1453 "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
1454 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1455 "MOV TEMP[1], TEMP[0].zzzz\n"
1456 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1458 /* Load result if available. */
1460 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1463 /* Load previously accumulated result if requested. */
1464 "MOV TEMP[0], IMM[0].xxxx\n"
1465 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1467 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1470 "MOV TEMP[1].x, IMM[0].xxxx\n"
1472 /* Break if accumulated result so far is not available. */
1473 "UIF TEMP[0].zzzz\n"
1477 /* Break if result_index >= result_count. */
1478 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1483 /* Load fence and check result availability */
1484 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1485 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1486 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1487 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1488 "UIF TEMP[0].zzzz\n"
1492 "MOV TEMP[1].y, IMM[0].xxxx\n"
1494 /* Load start and end. */
1495 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1496 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1497 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1499 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1500 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1502 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1504 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1505 "UIF TEMP[5].zzzz\n"
1506 /* Load second start/end half-pair and
1507 * take the difference
1509 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1510 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1511 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1513 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1514 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1517 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1519 /* Increment pair index */
1520 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1521 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1527 /* Increment result index */
1528 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1532 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1534 /* Store accumulated data for chaining. */
1535 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1537 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1539 /* Store result availability. */
1540 "NOT TEMP[0].z, TEMP[0]\n"
1541 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1542 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1544 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1546 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1549 /* Store result if it is available. */
1550 "NOT TEMP[4], TEMP[0].zzzz\n"
1552 /* Apply timestamp conversion */
1553 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1555 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1556 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1559 /* Convert to boolean */
1560 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1562 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1563 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1564 "MOV TEMP[0].y, IMM[0].xxxx\n"
1567 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1569 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1572 "UIF TEMP[0].yyyy\n"
1573 "MOV TEMP[0].x, IMM[0].wwww\n"
1576 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1578 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1581 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1589 char text
[sizeof(text_tmpl
) + 32];
1590 struct tgsi_token tokens
[1024];
1591 struct pipe_compute_state state
= {};
1593 /* Hard code the frequency into the shader so that the backend can
1594 * use the full range of optimizations for divide-by-constant.
1596 snprintf(text
, sizeof(text
), text_tmpl
,
1597 sctx
->screen
->info
.clock_crystal_freq
);
1599 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
1604 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
1605 state
.prog
= tokens
;
1607 sctx
->b
.query_result_shader
= sctx
->b
.b
.create_compute_state(&sctx
->b
.b
, &state
);
1610 static void si_restore_qbo_state(struct si_context
*sctx
,
1611 struct si_qbo_state
*st
)
1613 sctx
->b
.b
.bind_compute_state(&sctx
->b
.b
, st
->saved_compute
);
1615 sctx
->b
.b
.set_constant_buffer(&sctx
->b
.b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1616 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1618 sctx
->b
.b
.set_shader_buffers(&sctx
->b
.b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
);
1619 for (unsigned i
= 0; i
< 3; ++i
)
1620 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1623 static void si_query_hw_get_result_resource(struct si_context
*sctx
,
1624 struct si_query
*rquery
,
1626 enum pipe_query_value_type result_type
,
1628 struct pipe_resource
*resource
,
1631 struct si_query_hw
*query
= (struct si_query_hw
*)rquery
;
1632 struct si_query_buffer
*qbuf
;
1633 struct si_query_buffer
*qbuf_prev
;
1634 struct pipe_resource
*tmp_buffer
= NULL
;
1635 unsigned tmp_buffer_offset
= 0;
1636 struct si_qbo_state saved_state
= {};
1637 struct pipe_grid_info grid
= {};
1638 struct pipe_constant_buffer constant_buffer
= {};
1639 struct pipe_shader_buffer ssbo
[3];
1640 struct si_hw_query_params params
;
1642 uint32_t end_offset
;
1643 uint32_t result_stride
;
1644 uint32_t result_count
;
1646 uint32_t fence_offset
;
1647 uint32_t pair_stride
;
1648 uint32_t pair_count
;
1651 if (!sctx
->b
.query_result_shader
) {
1652 si_create_query_result_shader(sctx
);
1653 if (!sctx
->b
.query_result_shader
)
1657 if (query
->buffer
.previous
) {
1658 u_suballocator_alloc(sctx
->b
.allocator_zeroed_memory
, 16, 16,
1659 &tmp_buffer_offset
, &tmp_buffer
);
1664 si_save_qbo_state(sctx
, &saved_state
);
1666 si_get_hw_query_params(sctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1667 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1668 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1669 consts
.result_stride
= query
->result_size
;
1670 consts
.pair_stride
= params
.pair_stride
;
1671 consts
.pair_count
= params
.pair_count
;
1673 constant_buffer
.buffer_size
= sizeof(consts
);
1674 constant_buffer
.user_buffer
= &consts
;
1676 ssbo
[1].buffer
= tmp_buffer
;
1677 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1678 ssbo
[1].buffer_size
= 16;
1682 sctx
->b
.b
.bind_compute_state(&sctx
->b
.b
, sctx
->b
.query_result_shader
);
1694 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1695 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
)
1697 else if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1698 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1699 consts
.config
|= 8 | 256;
1700 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1701 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1702 consts
.config
|= 32;
1704 switch (result_type
) {
1705 case PIPE_QUERY_TYPE_U64
:
1706 case PIPE_QUERY_TYPE_I64
:
1707 consts
.config
|= 64;
1709 case PIPE_QUERY_TYPE_I32
:
1710 consts
.config
|= 128;
1712 case PIPE_QUERY_TYPE_U32
:
1716 sctx
->b
.flags
|= sctx
->screen
->barrier_flags
.cp_to_L2
;
1718 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1719 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1720 qbuf_prev
= qbuf
->previous
;
1721 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1722 consts
.config
&= ~3;
1723 if (qbuf
!= &query
->buffer
)
1728 /* Only read the last timestamp. */
1730 consts
.result_count
= 0;
1731 consts
.config
|= 16;
1732 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1735 sctx
->b
.b
.set_constant_buffer(&sctx
->b
.b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1737 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1738 ssbo
[0].buffer_offset
= params
.start_offset
;
1739 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1741 if (!qbuf
->previous
) {
1742 ssbo
[2].buffer
= resource
;
1743 ssbo
[2].buffer_offset
= offset
;
1744 ssbo
[2].buffer_size
= 8;
1746 ((struct r600_resource
*)resource
)->TC_L2_dirty
= true;
1749 sctx
->b
.b
.set_shader_buffers(&sctx
->b
.b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
);
1751 if (wait
&& qbuf
== &query
->buffer
) {
1754 /* Wait for result availability. Wait only for readiness
1755 * of the last entry, since the fence writes should be
1756 * serialized in the CP.
1758 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1759 va
+= params
.fence_offset
;
1761 si_gfx_wait_fence(sctx
, va
, 0x80000000, 0x80000000);
1764 sctx
->b
.b
.launch_grid(&sctx
->b
.b
, &grid
);
1765 sctx
->b
.flags
|= SI_CONTEXT_CS_PARTIAL_FLUSH
;
1768 si_restore_qbo_state(sctx
, &saved_state
);
1769 pipe_resource_reference(&tmp_buffer
, NULL
);
1772 static void si_render_condition(struct pipe_context
*ctx
,
1773 struct pipe_query
*query
,
1775 enum pipe_render_cond_flag mode
)
1777 struct si_context
*sctx
= (struct si_context
*)ctx
;
1778 struct si_query_hw
*rquery
= (struct si_query_hw
*)query
;
1779 struct r600_atom
*atom
= &sctx
->b
.render_cond_atom
;
1782 bool needs_workaround
= false;
1784 /* There was a firmware regression in VI which causes successive
1785 * SET_PREDICATION packets to give the wrong answer for
1786 * non-inverted stream overflow predication.
1788 if (((sctx
->b
.chip_class
== VI
&& sctx
->screen
->info
.pfp_fw_feature
< 49) ||
1789 (sctx
->b
.chip_class
== GFX9
&& sctx
->screen
->info
.pfp_fw_feature
< 38)) &&
1791 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
||
1792 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
&&
1793 (rquery
->buffer
.previous
||
1794 rquery
->buffer
.results_end
> rquery
->result_size
)))) {
1795 needs_workaround
= true;
1798 if (needs_workaround
&& !rquery
->workaround_buf
) {
1799 bool old_force_off
= sctx
->b
.render_cond_force_off
;
1800 sctx
->b
.render_cond_force_off
= true;
1802 u_suballocator_alloc(
1803 sctx
->b
.allocator_zeroed_memory
, 8, 8,
1804 &rquery
->workaround_offset
,
1805 (struct pipe_resource
**)&rquery
->workaround_buf
);
1807 /* Reset to NULL to avoid a redundant SET_PREDICATION
1808 * from launching the compute grid.
1810 sctx
->b
.render_cond
= NULL
;
1812 ctx
->get_query_result_resource(
1813 ctx
, query
, true, PIPE_QUERY_TYPE_U64
, 0,
1814 &rquery
->workaround_buf
->b
.b
, rquery
->workaround_offset
);
1816 /* Settings this in the render cond atom is too late,
1817 * so set it here. */
1818 sctx
->b
.flags
|= sctx
->screen
->barrier_flags
.L2_to_cp
|
1819 SI_CONTEXT_FLUSH_FOR_RENDER_COND
;
1821 sctx
->b
.render_cond_force_off
= old_force_off
;
1825 sctx
->b
.render_cond
= query
;
1826 sctx
->b
.render_cond_invert
= condition
;
1827 sctx
->b
.render_cond_mode
= mode
;
1829 si_set_atom_dirty(sctx
, atom
, query
!= NULL
);
1832 void si_suspend_queries(struct si_context
*sctx
)
1834 struct si_query_hw
*query
;
1836 LIST_FOR_EACH_ENTRY(query
, &sctx
->b
.active_queries
, list
) {
1837 si_query_hw_emit_stop(sctx
, query
);
1839 assert(sctx
->b
.num_cs_dw_queries_suspend
== 0);
1842 void si_resume_queries(struct si_context
*sctx
)
1844 struct si_query_hw
*query
;
1846 assert(sctx
->b
.num_cs_dw_queries_suspend
== 0);
1848 /* Check CS space here. Resuming must not be interrupted by flushes. */
1849 si_need_gfx_cs_space(sctx
);
1851 LIST_FOR_EACH_ENTRY(query
, &sctx
->b
.active_queries
, list
) {
1852 si_query_hw_emit_start(sctx
, query
);
1856 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1859 .query_type = SI_QUERY_##query_type_, \
1860 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1861 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1862 .group_id = group_id_ \
1865 #define X(name_, query_type_, type_, result_type_) \
1866 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1868 #define XG(group_, name_, query_type_, type_, result_type_) \
1869 XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1871 static struct pipe_driver_query_info si_driver_query_list
[] = {
1872 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1873 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1874 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1875 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1876 X("decompress-calls", DECOMPRESS_CALLS
, UINT64
, AVERAGE
),
1877 X("MRT-draw-calls", MRT_DRAW_CALLS
, UINT64
, AVERAGE
),
1878 X("prim-restart-calls", PRIM_RESTART_CALLS
, UINT64
, AVERAGE
),
1879 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1880 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1881 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1882 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1883 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1884 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1885 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1886 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1887 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1888 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1889 X("num-L2-invalidates", NUM_L2_INVALIDATES
, UINT64
, AVERAGE
),
1890 X("num-L2-writebacks", NUM_L2_WRITEBACKS
, UINT64
, AVERAGE
),
1891 X("num-resident-handles", NUM_RESIDENT_HANDLES
, UINT64
, AVERAGE
),
1892 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS
, UINT64
, AVERAGE
),
1893 X("tc-direct-slots", TC_DIRECT_SLOTS
, UINT64
, AVERAGE
),
1894 X("tc-num-syncs", TC_NUM_SYNCS
, UINT64
, AVERAGE
),
1895 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1896 X("gallium-thread-busy", GALLIUM_THREAD_BUSY
, UINT64
, AVERAGE
),
1897 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1898 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1899 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1900 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1901 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1902 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1903 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1904 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1905 X("GFX-BO-list-size", GFX_BO_LIST_SIZE
, UINT64
, AVERAGE
),
1906 X("GFX-IB-size", GFX_IB_SIZE
, UINT64
, AVERAGE
),
1907 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1908 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1909 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS
, UINT64
, CUMULATIVE
),
1910 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1911 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1912 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1913 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1915 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1916 * which use it as a fallback path to detect the GPU type.
1918 * Note: The names of these queries are significant for GPUPerfStudio
1919 * (and possibly their order as well). */
1920 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1921 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1922 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1923 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1924 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1926 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1927 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1928 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1930 /* The following queries must be at the end of the list because their
1931 * availability is adjusted dynamically based on the DRM version. */
1932 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1933 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
1934 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
1935 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
1936 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
1937 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
1938 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
1939 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
1940 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
1941 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
1942 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
1943 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
1944 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
1945 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
1946 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
1947 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
1948 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
1949 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
1950 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
1951 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY
, UINT64
, AVERAGE
),
1952 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
1959 static unsigned si_get_num_queries(struct si_screen
*sscreen
)
1961 if (sscreen
->info
.drm_major
== 2 && sscreen
->info
.drm_minor
>= 42)
1962 return ARRAY_SIZE(si_driver_query_list
);
1963 else if (sscreen
->info
.drm_major
== 3) {
1964 if (sscreen
->info
.chip_class
>= VI
)
1965 return ARRAY_SIZE(si_driver_query_list
);
1967 return ARRAY_SIZE(si_driver_query_list
) - 7;
1970 return ARRAY_SIZE(si_driver_query_list
) - 25;
1973 static int si_get_driver_query_info(struct pipe_screen
*screen
,
1975 struct pipe_driver_query_info
*info
)
1977 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
1978 unsigned num_queries
= si_get_num_queries(sscreen
);
1981 unsigned num_perfcounters
=
1982 si_get_perfcounter_info(sscreen
, 0, NULL
);
1984 return num_queries
+ num_perfcounters
;
1987 if (index
>= num_queries
)
1988 return si_get_perfcounter_info(sscreen
, index
- num_queries
, info
);
1990 *info
= si_driver_query_list
[index
];
1992 switch (info
->query_type
) {
1993 case SI_QUERY_REQUESTED_VRAM
:
1994 case SI_QUERY_VRAM_USAGE
:
1995 case SI_QUERY_MAPPED_VRAM
:
1996 info
->max_value
.u64
= sscreen
->info
.vram_size
;
1998 case SI_QUERY_REQUESTED_GTT
:
1999 case SI_QUERY_GTT_USAGE
:
2000 case SI_QUERY_MAPPED_GTT
:
2001 info
->max_value
.u64
= sscreen
->info
.gart_size
;
2003 case SI_QUERY_GPU_TEMPERATURE
:
2004 info
->max_value
.u64
= 125;
2006 case SI_QUERY_VRAM_VIS_USAGE
:
2007 info
->max_value
.u64
= sscreen
->info
.vram_vis_size
;
2011 if (info
->group_id
!= ~(unsigned)0 && sscreen
->perfcounters
)
2012 info
->group_id
+= sscreen
->perfcounters
->num_groups
;
2017 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2018 * performance counter groups, so be careful when changing this and related
2021 static int si_get_driver_query_group_info(struct pipe_screen
*screen
,
2023 struct pipe_driver_query_group_info
*info
)
2025 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
2026 unsigned num_pc_groups
= 0;
2028 if (sscreen
->perfcounters
)
2029 num_pc_groups
= sscreen
->perfcounters
->num_groups
;
2032 return num_pc_groups
+ SI_NUM_SW_QUERY_GROUPS
;
2034 if (index
< num_pc_groups
)
2035 return si_get_perfcounter_group_info(sscreen
, index
, info
);
2037 index
-= num_pc_groups
;
2038 if (index
>= SI_NUM_SW_QUERY_GROUPS
)
2041 info
->name
= "GPIN";
2042 info
->max_active_queries
= 5;
2043 info
->num_queries
= 5;
2047 void si_init_query_functions(struct si_context
*sctx
)
2049 sctx
->b
.b
.create_query
= si_create_query
;
2050 sctx
->b
.b
.create_batch_query
= si_create_batch_query
;
2051 sctx
->b
.b
.destroy_query
= si_destroy_query
;
2052 sctx
->b
.b
.begin_query
= si_begin_query
;
2053 sctx
->b
.b
.end_query
= si_end_query
;
2054 sctx
->b
.b
.get_query_result
= si_get_query_result
;
2055 sctx
->b
.b
.get_query_result_resource
= si_get_query_result_resource
;
2056 sctx
->b
.render_cond_atom
.emit
= si_emit_query_predication
;
2058 if (((struct si_screen
*)sctx
->b
.b
.screen
)->info
.num_render_backends
> 0)
2059 sctx
->b
.b
.render_condition
= si_render_condition
;
2061 LIST_INITHEAD(&sctx
->b
.active_queries
);
2064 void si_init_screen_query_functions(struct si_screen
*sscreen
)
2066 sscreen
->b
.get_driver_query_info
= si_get_driver_query_info
;
2067 sscreen
->b
.get_driver_query_group_info
= si_get_driver_query_group_info
;