2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "util/os_time.h"
30 #include "tgsi/tgsi_text.h"
31 #include "amd/common/sid.h"
33 /* TODO: remove this: */
34 void si_update_prims_generated_query_state(struct r600_common_context
*rctx
,
35 unsigned type
, int diff
);
37 #define R600_MAX_STREAMS 4
39 struct r600_hw_query_params
{
40 unsigned start_offset
;
42 unsigned fence_offset
;
47 /* Queries without buffer handling or suspend/resume. */
48 struct r600_query_sw
{
51 uint64_t begin_result
;
57 /* Fence for GPU_FINISHED. */
58 struct pipe_fence_handle
*fence
;
61 static void r600_query_sw_destroy(struct r600_common_screen
*rscreen
,
62 struct r600_query
*rquery
)
64 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
66 rscreen
->b
.fence_reference(&rscreen
->b
, &query
->fence
, NULL
);
70 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
73 case R600_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
74 case R600_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
75 case R600_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
76 case R600_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
77 case R600_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
78 case R600_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
79 case R600_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
80 case R600_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
81 case R600_QUERY_GFX_BO_LIST_SIZE
: return RADEON_GFX_BO_LIST_COUNTER
;
82 case R600_QUERY_GFX_IB_SIZE
: return RADEON_GFX_IB_SIZE_COUNTER
;
83 case R600_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
84 case R600_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
85 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS
;
86 case R600_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
87 case R600_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
88 case R600_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
89 case R600_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
90 case R600_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
91 case R600_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
92 case R600_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
93 default: unreachable("query type does not correspond to winsys id");
97 static bool r600_query_sw_begin(struct r600_common_context
*rctx
,
98 struct r600_query
*rquery
)
100 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
101 enum radeon_value_id ws_id
;
103 switch(query
->b
.type
) {
104 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
105 case PIPE_QUERY_GPU_FINISHED
:
107 case R600_QUERY_DRAW_CALLS
:
108 query
->begin_result
= rctx
->num_draw_calls
;
110 case R600_QUERY_DECOMPRESS_CALLS
:
111 query
->begin_result
= rctx
->num_decompress_calls
;
113 case R600_QUERY_MRT_DRAW_CALLS
:
114 query
->begin_result
= rctx
->num_mrt_draw_calls
;
116 case R600_QUERY_PRIM_RESTART_CALLS
:
117 query
->begin_result
= rctx
->num_prim_restart_calls
;
119 case R600_QUERY_SPILL_DRAW_CALLS
:
120 query
->begin_result
= rctx
->num_spill_draw_calls
;
122 case R600_QUERY_COMPUTE_CALLS
:
123 query
->begin_result
= rctx
->num_compute_calls
;
125 case R600_QUERY_SPILL_COMPUTE_CALLS
:
126 query
->begin_result
= rctx
->num_spill_compute_calls
;
128 case R600_QUERY_DMA_CALLS
:
129 query
->begin_result
= rctx
->num_dma_calls
;
131 case R600_QUERY_CP_DMA_CALLS
:
132 query
->begin_result
= rctx
->num_cp_dma_calls
;
134 case R600_QUERY_NUM_VS_FLUSHES
:
135 query
->begin_result
= rctx
->num_vs_flushes
;
137 case R600_QUERY_NUM_PS_FLUSHES
:
138 query
->begin_result
= rctx
->num_ps_flushes
;
140 case R600_QUERY_NUM_CS_FLUSHES
:
141 query
->begin_result
= rctx
->num_cs_flushes
;
143 case R600_QUERY_NUM_CB_CACHE_FLUSHES
:
144 query
->begin_result
= rctx
->num_cb_cache_flushes
;
146 case R600_QUERY_NUM_DB_CACHE_FLUSHES
:
147 query
->begin_result
= rctx
->num_db_cache_flushes
;
149 case R600_QUERY_NUM_L2_INVALIDATES
:
150 query
->begin_result
= rctx
->num_L2_invalidates
;
152 case R600_QUERY_NUM_L2_WRITEBACKS
:
153 query
->begin_result
= rctx
->num_L2_writebacks
;
155 case R600_QUERY_NUM_RESIDENT_HANDLES
:
156 query
->begin_result
= rctx
->num_resident_handles
;
158 case R600_QUERY_TC_OFFLOADED_SLOTS
:
159 query
->begin_result
= rctx
->tc
? rctx
->tc
->num_offloaded_slots
: 0;
161 case R600_QUERY_TC_DIRECT_SLOTS
:
162 query
->begin_result
= rctx
->tc
? rctx
->tc
->num_direct_slots
: 0;
164 case R600_QUERY_TC_NUM_SYNCS
:
165 query
->begin_result
= rctx
->tc
? rctx
->tc
->num_syncs
: 0;
167 case R600_QUERY_REQUESTED_VRAM
:
168 case R600_QUERY_REQUESTED_GTT
:
169 case R600_QUERY_MAPPED_VRAM
:
170 case R600_QUERY_MAPPED_GTT
:
171 case R600_QUERY_VRAM_USAGE
:
172 case R600_QUERY_VRAM_VIS_USAGE
:
173 case R600_QUERY_GTT_USAGE
:
174 case R600_QUERY_GPU_TEMPERATURE
:
175 case R600_QUERY_CURRENT_GPU_SCLK
:
176 case R600_QUERY_CURRENT_GPU_MCLK
:
177 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
178 case R600_QUERY_NUM_MAPPED_BUFFERS
:
179 query
->begin_result
= 0;
181 case R600_QUERY_BUFFER_WAIT_TIME
:
182 case R600_QUERY_GFX_IB_SIZE
:
183 case R600_QUERY_NUM_GFX_IBS
:
184 case R600_QUERY_NUM_SDMA_IBS
:
185 case R600_QUERY_NUM_BYTES_MOVED
:
186 case R600_QUERY_NUM_EVICTIONS
:
187 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
188 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
189 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
192 case R600_QUERY_GFX_BO_LIST_SIZE
:
193 ws_id
= winsys_id_from_type(query
->b
.type
);
194 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
195 query
->begin_time
= rctx
->ws
->query_value(rctx
->ws
,
198 case R600_QUERY_CS_THREAD_BUSY
:
199 ws_id
= winsys_id_from_type(query
->b
.type
);
200 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
201 query
->begin_time
= os_time_get_nano();
203 case R600_QUERY_GALLIUM_THREAD_BUSY
:
204 query
->begin_result
=
205 rctx
->tc
? util_queue_get_thread_time_nano(&rctx
->tc
->queue
, 0) : 0;
206 query
->begin_time
= os_time_get_nano();
208 case R600_QUERY_GPU_LOAD
:
209 case R600_QUERY_GPU_SHADERS_BUSY
:
210 case R600_QUERY_GPU_TA_BUSY
:
211 case R600_QUERY_GPU_GDS_BUSY
:
212 case R600_QUERY_GPU_VGT_BUSY
:
213 case R600_QUERY_GPU_IA_BUSY
:
214 case R600_QUERY_GPU_SX_BUSY
:
215 case R600_QUERY_GPU_WD_BUSY
:
216 case R600_QUERY_GPU_BCI_BUSY
:
217 case R600_QUERY_GPU_SC_BUSY
:
218 case R600_QUERY_GPU_PA_BUSY
:
219 case R600_QUERY_GPU_DB_BUSY
:
220 case R600_QUERY_GPU_CP_BUSY
:
221 case R600_QUERY_GPU_CB_BUSY
:
222 case R600_QUERY_GPU_SDMA_BUSY
:
223 case R600_QUERY_GPU_PFP_BUSY
:
224 case R600_QUERY_GPU_MEQ_BUSY
:
225 case R600_QUERY_GPU_ME_BUSY
:
226 case R600_QUERY_GPU_SURF_SYNC_BUSY
:
227 case R600_QUERY_GPU_CP_DMA_BUSY
:
228 case R600_QUERY_GPU_SCRATCH_RAM_BUSY
:
229 query
->begin_result
= si_begin_counter(rctx
->screen
,
232 case R600_QUERY_NUM_COMPILATIONS
:
233 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
235 case R600_QUERY_NUM_SHADERS_CREATED
:
236 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
238 case R600_QUERY_NUM_SHADER_CACHE_HITS
:
239 query
->begin_result
=
240 p_atomic_read(&rctx
->screen
->num_shader_cache_hits
);
242 case R600_QUERY_GPIN_ASIC_ID
:
243 case R600_QUERY_GPIN_NUM_SIMD
:
244 case R600_QUERY_GPIN_NUM_RB
:
245 case R600_QUERY_GPIN_NUM_SPI
:
246 case R600_QUERY_GPIN_NUM_SE
:
249 unreachable("r600_query_sw_begin: bad query type");
255 static bool r600_query_sw_end(struct r600_common_context
*rctx
,
256 struct r600_query
*rquery
)
258 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
259 enum radeon_value_id ws_id
;
261 switch(query
->b
.type
) {
262 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
264 case PIPE_QUERY_GPU_FINISHED
:
265 rctx
->b
.flush(&rctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
267 case R600_QUERY_DRAW_CALLS
:
268 query
->end_result
= rctx
->num_draw_calls
;
270 case R600_QUERY_DECOMPRESS_CALLS
:
271 query
->end_result
= rctx
->num_decompress_calls
;
273 case R600_QUERY_MRT_DRAW_CALLS
:
274 query
->end_result
= rctx
->num_mrt_draw_calls
;
276 case R600_QUERY_PRIM_RESTART_CALLS
:
277 query
->end_result
= rctx
->num_prim_restart_calls
;
279 case R600_QUERY_SPILL_DRAW_CALLS
:
280 query
->end_result
= rctx
->num_spill_draw_calls
;
282 case R600_QUERY_COMPUTE_CALLS
:
283 query
->end_result
= rctx
->num_compute_calls
;
285 case R600_QUERY_SPILL_COMPUTE_CALLS
:
286 query
->end_result
= rctx
->num_spill_compute_calls
;
288 case R600_QUERY_DMA_CALLS
:
289 query
->end_result
= rctx
->num_dma_calls
;
291 case R600_QUERY_CP_DMA_CALLS
:
292 query
->end_result
= rctx
->num_cp_dma_calls
;
294 case R600_QUERY_NUM_VS_FLUSHES
:
295 query
->end_result
= rctx
->num_vs_flushes
;
297 case R600_QUERY_NUM_PS_FLUSHES
:
298 query
->end_result
= rctx
->num_ps_flushes
;
300 case R600_QUERY_NUM_CS_FLUSHES
:
301 query
->end_result
= rctx
->num_cs_flushes
;
303 case R600_QUERY_NUM_CB_CACHE_FLUSHES
:
304 query
->end_result
= rctx
->num_cb_cache_flushes
;
306 case R600_QUERY_NUM_DB_CACHE_FLUSHES
:
307 query
->end_result
= rctx
->num_db_cache_flushes
;
309 case R600_QUERY_NUM_L2_INVALIDATES
:
310 query
->end_result
= rctx
->num_L2_invalidates
;
312 case R600_QUERY_NUM_L2_WRITEBACKS
:
313 query
->end_result
= rctx
->num_L2_writebacks
;
315 case R600_QUERY_NUM_RESIDENT_HANDLES
:
316 query
->end_result
= rctx
->num_resident_handles
;
318 case R600_QUERY_TC_OFFLOADED_SLOTS
:
319 query
->end_result
= rctx
->tc
? rctx
->tc
->num_offloaded_slots
: 0;
321 case R600_QUERY_TC_DIRECT_SLOTS
:
322 query
->end_result
= rctx
->tc
? rctx
->tc
->num_direct_slots
: 0;
324 case R600_QUERY_TC_NUM_SYNCS
:
325 query
->end_result
= rctx
->tc
? rctx
->tc
->num_syncs
: 0;
327 case R600_QUERY_REQUESTED_VRAM
:
328 case R600_QUERY_REQUESTED_GTT
:
329 case R600_QUERY_MAPPED_VRAM
:
330 case R600_QUERY_MAPPED_GTT
:
331 case R600_QUERY_VRAM_USAGE
:
332 case R600_QUERY_VRAM_VIS_USAGE
:
333 case R600_QUERY_GTT_USAGE
:
334 case R600_QUERY_GPU_TEMPERATURE
:
335 case R600_QUERY_CURRENT_GPU_SCLK
:
336 case R600_QUERY_CURRENT_GPU_MCLK
:
337 case R600_QUERY_BUFFER_WAIT_TIME
:
338 case R600_QUERY_GFX_IB_SIZE
:
339 case R600_QUERY_NUM_MAPPED_BUFFERS
:
340 case R600_QUERY_NUM_GFX_IBS
:
341 case R600_QUERY_NUM_SDMA_IBS
:
342 case R600_QUERY_NUM_BYTES_MOVED
:
343 case R600_QUERY_NUM_EVICTIONS
:
344 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
345 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
346 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
349 case R600_QUERY_GFX_BO_LIST_SIZE
:
350 ws_id
= winsys_id_from_type(query
->b
.type
);
351 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
352 query
->end_time
= rctx
->ws
->query_value(rctx
->ws
,
355 case R600_QUERY_CS_THREAD_BUSY
:
356 ws_id
= winsys_id_from_type(query
->b
.type
);
357 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
358 query
->end_time
= os_time_get_nano();
360 case R600_QUERY_GALLIUM_THREAD_BUSY
:
362 rctx
->tc
? util_queue_get_thread_time_nano(&rctx
->tc
->queue
, 0) : 0;
363 query
->end_time
= os_time_get_nano();
365 case R600_QUERY_GPU_LOAD
:
366 case R600_QUERY_GPU_SHADERS_BUSY
:
367 case R600_QUERY_GPU_TA_BUSY
:
368 case R600_QUERY_GPU_GDS_BUSY
:
369 case R600_QUERY_GPU_VGT_BUSY
:
370 case R600_QUERY_GPU_IA_BUSY
:
371 case R600_QUERY_GPU_SX_BUSY
:
372 case R600_QUERY_GPU_WD_BUSY
:
373 case R600_QUERY_GPU_BCI_BUSY
:
374 case R600_QUERY_GPU_SC_BUSY
:
375 case R600_QUERY_GPU_PA_BUSY
:
376 case R600_QUERY_GPU_DB_BUSY
:
377 case R600_QUERY_GPU_CP_BUSY
:
378 case R600_QUERY_GPU_CB_BUSY
:
379 case R600_QUERY_GPU_SDMA_BUSY
:
380 case R600_QUERY_GPU_PFP_BUSY
:
381 case R600_QUERY_GPU_MEQ_BUSY
:
382 case R600_QUERY_GPU_ME_BUSY
:
383 case R600_QUERY_GPU_SURF_SYNC_BUSY
:
384 case R600_QUERY_GPU_CP_DMA_BUSY
:
385 case R600_QUERY_GPU_SCRATCH_RAM_BUSY
:
386 query
->end_result
= si_end_counter(rctx
->screen
,
388 query
->begin_result
);
389 query
->begin_result
= 0;
391 case R600_QUERY_NUM_COMPILATIONS
:
392 query
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
394 case R600_QUERY_NUM_SHADERS_CREATED
:
395 query
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
397 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
398 query
->end_result
= rctx
->last_tex_ps_draw_ratio
;
400 case R600_QUERY_NUM_SHADER_CACHE_HITS
:
402 p_atomic_read(&rctx
->screen
->num_shader_cache_hits
);
404 case R600_QUERY_GPIN_ASIC_ID
:
405 case R600_QUERY_GPIN_NUM_SIMD
:
406 case R600_QUERY_GPIN_NUM_RB
:
407 case R600_QUERY_GPIN_NUM_SPI
:
408 case R600_QUERY_GPIN_NUM_SE
:
411 unreachable("r600_query_sw_end: bad query type");
417 static bool r600_query_sw_get_result(struct r600_common_context
*rctx
,
418 struct r600_query
*rquery
,
420 union pipe_query_result
*result
)
422 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
424 switch (query
->b
.type
) {
425 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
426 /* Convert from cycles per millisecond to cycles per second (Hz). */
427 result
->timestamp_disjoint
.frequency
=
428 (uint64_t)rctx
->screen
->info
.clock_crystal_freq
* 1000;
429 result
->timestamp_disjoint
.disjoint
= false;
431 case PIPE_QUERY_GPU_FINISHED
: {
432 struct pipe_screen
*screen
= rctx
->b
.screen
;
433 struct pipe_context
*ctx
= rquery
->b
.flushed
? NULL
: &rctx
->b
;
435 result
->b
= screen
->fence_finish(screen
, ctx
, query
->fence
,
436 wait
? PIPE_TIMEOUT_INFINITE
: 0);
440 case R600_QUERY_GFX_BO_LIST_SIZE
:
441 result
->u64
= (query
->end_result
- query
->begin_result
) /
442 (query
->end_time
- query
->begin_time
);
444 case R600_QUERY_CS_THREAD_BUSY
:
445 case R600_QUERY_GALLIUM_THREAD_BUSY
:
446 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
447 (query
->end_time
- query
->begin_time
);
449 case R600_QUERY_GPIN_ASIC_ID
:
452 case R600_QUERY_GPIN_NUM_SIMD
:
453 result
->u32
= rctx
->screen
->info
.num_good_compute_units
;
455 case R600_QUERY_GPIN_NUM_RB
:
456 result
->u32
= rctx
->screen
->info
.num_render_backends
;
458 case R600_QUERY_GPIN_NUM_SPI
:
459 result
->u32
= 1; /* all supported chips have one SPI per SE */
461 case R600_QUERY_GPIN_NUM_SE
:
462 result
->u32
= rctx
->screen
->info
.max_se
;
466 result
->u64
= query
->end_result
- query
->begin_result
;
468 switch (query
->b
.type
) {
469 case R600_QUERY_BUFFER_WAIT_TIME
:
470 case R600_QUERY_GPU_TEMPERATURE
:
473 case R600_QUERY_CURRENT_GPU_SCLK
:
474 case R600_QUERY_CURRENT_GPU_MCLK
:
475 result
->u64
*= 1000000;
483 static struct r600_query_ops sw_query_ops
= {
484 .destroy
= r600_query_sw_destroy
,
485 .begin
= r600_query_sw_begin
,
486 .end
= r600_query_sw_end
,
487 .get_result
= r600_query_sw_get_result
,
488 .get_result_resource
= NULL
491 static struct pipe_query
*r600_query_sw_create(unsigned query_type
)
493 struct r600_query_sw
*query
;
495 query
= CALLOC_STRUCT(r600_query_sw
);
499 query
->b
.type
= query_type
;
500 query
->b
.ops
= &sw_query_ops
;
502 return (struct pipe_query
*)query
;
505 void si_query_hw_destroy(struct r600_common_screen
*rscreen
,
506 struct r600_query
*rquery
)
508 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
509 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
511 /* Release all query buffers. */
513 struct r600_query_buffer
*qbuf
= prev
;
514 prev
= prev
->previous
;
515 r600_resource_reference(&qbuf
->buf
, NULL
);
519 r600_resource_reference(&query
->buffer
.buf
, NULL
);
520 r600_resource_reference(&query
->workaround_buf
, NULL
);
524 static struct r600_resource
*r600_new_query_buffer(struct r600_common_screen
*rscreen
,
525 struct r600_query_hw
*query
)
527 unsigned buf_size
= MAX2(query
->result_size
,
528 rscreen
->info
.min_alloc_size
);
530 /* Queries are normally read by the CPU after
531 * being written by the gpu, hence staging is probably a good
534 struct r600_resource
*buf
= (struct r600_resource
*)
535 pipe_buffer_create(&rscreen
->b
, 0,
536 PIPE_USAGE_STAGING
, buf_size
);
540 if (!query
->ops
->prepare_buffer(rscreen
, query
, buf
)) {
541 r600_resource_reference(&buf
, NULL
);
548 static bool r600_query_hw_prepare_buffer(struct r600_common_screen
*rscreen
,
549 struct r600_query_hw
*query
,
550 struct r600_resource
*buffer
)
552 /* Callers ensure that the buffer is currently unused by the GPU. */
553 uint32_t *results
= rscreen
->ws
->buffer_map(buffer
->buf
, NULL
,
554 PIPE_TRANSFER_WRITE
|
555 PIPE_TRANSFER_UNSYNCHRONIZED
);
559 memset(results
, 0, buffer
->b
.b
.width0
);
561 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
562 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
563 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
564 unsigned max_rbs
= rscreen
->info
.num_render_backends
;
565 unsigned enabled_rb_mask
= rscreen
->info
.enabled_rb_mask
;
566 unsigned num_results
;
569 /* Set top bits for unused backends. */
570 num_results
= buffer
->b
.b
.width0
/ query
->result_size
;
571 for (j
= 0; j
< num_results
; j
++) {
572 for (i
= 0; i
< max_rbs
; i
++) {
573 if (!(enabled_rb_mask
& (1<<i
))) {
574 results
[(i
* 4)+1] = 0x80000000;
575 results
[(i
* 4)+3] = 0x80000000;
578 results
+= 4 * max_rbs
;
585 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
586 struct r600_query
*rquery
,
588 enum pipe_query_value_type result_type
,
590 struct pipe_resource
*resource
,
593 static struct r600_query_ops query_hw_ops
= {
594 .destroy
= si_query_hw_destroy
,
595 .begin
= si_query_hw_begin
,
596 .end
= si_query_hw_end
,
597 .get_result
= si_query_hw_get_result
,
598 .get_result_resource
= r600_query_hw_get_result_resource
,
601 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
602 struct r600_query_hw
*query
,
603 struct r600_resource
*buffer
,
605 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
606 struct r600_query_hw
*query
,
607 struct r600_resource
*buffer
,
609 static void r600_query_hw_add_result(struct r600_common_screen
*rscreen
,
610 struct r600_query_hw
*, void *buffer
,
611 union pipe_query_result
*result
);
612 static void r600_query_hw_clear_result(struct r600_query_hw
*,
613 union pipe_query_result
*);
615 static struct r600_query_hw_ops query_hw_default_hw_ops
= {
616 .prepare_buffer
= r600_query_hw_prepare_buffer
,
617 .emit_start
= r600_query_hw_do_emit_start
,
618 .emit_stop
= r600_query_hw_do_emit_stop
,
619 .clear_result
= r600_query_hw_clear_result
,
620 .add_result
= r600_query_hw_add_result
,
623 bool si_query_hw_init(struct r600_common_screen
*rscreen
,
624 struct r600_query_hw
*query
)
626 query
->buffer
.buf
= r600_new_query_buffer(rscreen
, query
);
627 if (!query
->buffer
.buf
)
633 static struct pipe_query
*r600_query_hw_create(struct r600_common_screen
*rscreen
,
637 struct r600_query_hw
*query
= CALLOC_STRUCT(r600_query_hw
);
641 query
->b
.type
= query_type
;
642 query
->b
.ops
= &query_hw_ops
;
643 query
->ops
= &query_hw_default_hw_ops
;
645 switch (query_type
) {
646 case PIPE_QUERY_OCCLUSION_COUNTER
:
647 case PIPE_QUERY_OCCLUSION_PREDICATE
:
648 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
649 query
->result_size
= 16 * rscreen
->info
.num_render_backends
;
650 query
->result_size
+= 16; /* for the fence + alignment */
651 query
->num_cs_dw_begin
= 6;
652 query
->num_cs_dw_end
= 6 + si_gfx_write_fence_dwords(rscreen
);
654 case PIPE_QUERY_TIME_ELAPSED
:
655 query
->result_size
= 24;
656 query
->num_cs_dw_begin
= 8;
657 query
->num_cs_dw_end
= 8 + si_gfx_write_fence_dwords(rscreen
);
659 case PIPE_QUERY_TIMESTAMP
:
660 query
->result_size
= 16;
661 query
->num_cs_dw_end
= 8 + si_gfx_write_fence_dwords(rscreen
);
662 query
->flags
= R600_QUERY_HW_FLAG_NO_START
;
664 case PIPE_QUERY_PRIMITIVES_EMITTED
:
665 case PIPE_QUERY_PRIMITIVES_GENERATED
:
666 case PIPE_QUERY_SO_STATISTICS
:
667 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
668 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
669 query
->result_size
= 32;
670 query
->num_cs_dw_begin
= 6;
671 query
->num_cs_dw_end
= 6;
672 query
->stream
= index
;
674 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
675 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
676 query
->result_size
= 32 * R600_MAX_STREAMS
;
677 query
->num_cs_dw_begin
= 6 * R600_MAX_STREAMS
;
678 query
->num_cs_dw_end
= 6 * R600_MAX_STREAMS
;
680 case PIPE_QUERY_PIPELINE_STATISTICS
:
681 /* 11 values on GCN. */
682 query
->result_size
= 11 * 16;
683 query
->result_size
+= 8; /* for the fence + alignment */
684 query
->num_cs_dw_begin
= 6;
685 query
->num_cs_dw_end
= 6 + si_gfx_write_fence_dwords(rscreen
);
693 if (!si_query_hw_init(rscreen
, query
)) {
698 return (struct pipe_query
*)query
;
701 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
702 unsigned type
, int diff
)
704 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
705 type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
706 type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
707 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
708 bool old_perfect_enable
=
709 rctx
->num_perfect_occlusion_queries
!= 0;
710 bool enable
, perfect_enable
;
712 rctx
->num_occlusion_queries
+= diff
;
713 assert(rctx
->num_occlusion_queries
>= 0);
715 if (type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
716 rctx
->num_perfect_occlusion_queries
+= diff
;
717 assert(rctx
->num_perfect_occlusion_queries
>= 0);
720 enable
= rctx
->num_occlusion_queries
!= 0;
721 perfect_enable
= rctx
->num_perfect_occlusion_queries
!= 0;
723 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
724 rctx
->set_occlusion_query_state(&rctx
->b
, old_enable
,
730 static unsigned event_type_for_stream(unsigned stream
)
734 case 0: return V_028A90_SAMPLE_STREAMOUTSTATS
;
735 case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1
;
736 case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2
;
737 case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3
;
741 static void emit_sample_streamout(struct radeon_winsys_cs
*cs
, uint64_t va
,
744 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
745 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(stream
)) | EVENT_INDEX(3));
747 radeon_emit(cs
, va
>> 32);
750 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
751 struct r600_query_hw
*query
,
752 struct r600_resource
*buffer
,
755 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
757 switch (query
->b
.type
) {
758 case PIPE_QUERY_OCCLUSION_COUNTER
:
759 case PIPE_QUERY_OCCLUSION_PREDICATE
:
760 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
761 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
762 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
764 radeon_emit(cs
, va
>> 32);
766 case PIPE_QUERY_PRIMITIVES_EMITTED
:
767 case PIPE_QUERY_PRIMITIVES_GENERATED
:
768 case PIPE_QUERY_SO_STATISTICS
:
769 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
770 emit_sample_streamout(cs
, va
, query
->stream
);
772 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
773 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
)
774 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
776 case PIPE_QUERY_TIME_ELAPSED
:
777 /* Write the timestamp from the CP not waiting for
778 * outstanding draws (top-of-pipe).
780 radeon_emit(cs
, PKT3(PKT3_COPY_DATA
, 4, 0));
781 radeon_emit(cs
, COPY_DATA_COUNT_SEL
|
782 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP
) |
783 COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC
));
787 radeon_emit(cs
, va
>> 32);
789 case PIPE_QUERY_PIPELINE_STATISTICS
:
790 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
791 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
793 radeon_emit(cs
, va
>> 32);
798 radeon_add_to_buffer_list(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
802 static void r600_query_hw_emit_start(struct r600_common_context
*ctx
,
803 struct r600_query_hw
*query
)
807 if (!query
->buffer
.buf
)
808 return; // previous buffer allocation failure
810 r600_update_occlusion_query_state(ctx
, query
->b
.type
, 1);
811 si_update_prims_generated_query_state(ctx
, query
->b
.type
, 1);
813 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_begin
+ query
->num_cs_dw_end
,
816 /* Get a new query buffer if needed. */
817 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
818 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
819 *qbuf
= query
->buffer
;
820 query
->buffer
.results_end
= 0;
821 query
->buffer
.previous
= qbuf
;
822 query
->buffer
.buf
= r600_new_query_buffer(ctx
->screen
, query
);
823 if (!query
->buffer
.buf
)
827 /* emit begin query */
828 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
830 query
->ops
->emit_start(ctx
, query
, query
->buffer
.buf
, va
);
832 ctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
835 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
836 struct r600_query_hw
*query
,
837 struct r600_resource
*buffer
,
840 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
841 uint64_t fence_va
= 0;
843 switch (query
->b
.type
) {
844 case PIPE_QUERY_OCCLUSION_COUNTER
:
845 case PIPE_QUERY_OCCLUSION_PREDICATE
:
846 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
848 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
849 radeon_emit(cs
, EVENT_TYPE(V_028A90_ZPASS_DONE
) | EVENT_INDEX(1));
851 radeon_emit(cs
, va
>> 32);
853 fence_va
= va
+ ctx
->screen
->info
.num_render_backends
* 16 - 8;
855 case PIPE_QUERY_PRIMITIVES_EMITTED
:
856 case PIPE_QUERY_PRIMITIVES_GENERATED
:
857 case PIPE_QUERY_SO_STATISTICS
:
858 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
860 emit_sample_streamout(cs
, va
, query
->stream
);
862 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
864 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
)
865 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
867 case PIPE_QUERY_TIME_ELAPSED
:
870 case PIPE_QUERY_TIMESTAMP
:
871 si_gfx_write_event_eop(ctx
, V_028A90_BOTTOM_OF_PIPE_TS
,
872 0, EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
876 case PIPE_QUERY_PIPELINE_STATISTICS
: {
877 unsigned sample_size
= (query
->result_size
- 8) / 2;
880 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
881 radeon_emit(cs
, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
883 radeon_emit(cs
, va
>> 32);
885 fence_va
= va
+ sample_size
;
891 radeon_add_to_buffer_list(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
895 si_gfx_write_event_eop(ctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
896 EOP_DATA_SEL_VALUE_32BIT
,
897 query
->buffer
.buf
, fence_va
, 0x80000000,
901 static void r600_query_hw_emit_stop(struct r600_common_context
*ctx
,
902 struct r600_query_hw
*query
)
906 if (!query
->buffer
.buf
)
907 return; // previous buffer allocation failure
909 /* The queries which need begin already called this in begin_query. */
910 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
911 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_end
, false);
915 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
917 query
->ops
->emit_stop(ctx
, query
, query
->buffer
.buf
, va
);
919 query
->buffer
.results_end
+= query
->result_size
;
921 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
922 ctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
924 r600_update_occlusion_query_state(ctx
, query
->b
.type
, -1);
925 si_update_prims_generated_query_state(ctx
, query
->b
.type
, -1);
928 static void emit_set_predicate(struct r600_common_context
*ctx
,
929 struct r600_resource
*buf
, uint64_t va
,
932 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
934 if (ctx
->chip_class
>= GFX9
) {
935 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
938 radeon_emit(cs
, va
>> 32);
940 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
942 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
944 radeon_add_to_buffer_list(ctx
, &ctx
->gfx
, buf
, RADEON_USAGE_READ
,
948 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
949 struct r600_atom
*atom
)
951 struct r600_query_hw
*query
= (struct r600_query_hw
*)ctx
->render_cond
;
952 struct r600_query_buffer
*qbuf
;
954 bool flag_wait
, invert
;
959 invert
= ctx
->render_cond_invert
;
960 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
961 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
963 if (query
->workaround_buf
) {
964 op
= PRED_OP(PREDICATION_OP_BOOL64
);
966 switch (query
->b
.type
) {
967 case PIPE_QUERY_OCCLUSION_COUNTER
:
968 case PIPE_QUERY_OCCLUSION_PREDICATE
:
969 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
970 op
= PRED_OP(PREDICATION_OP_ZPASS
);
972 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
973 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
974 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
983 /* if true then invert, see GL_ARB_conditional_render_inverted */
985 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visible or overflow */
987 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visible or no overflow */
989 /* Use the value written by compute shader as a workaround. Note that
990 * the wait flag does not apply in this predication mode.
992 * The shader outputs the result value to L2. Workarounds only affect VI
993 * and later, where the CP reads data from L2, so we don't need an
996 if (query
->workaround_buf
) {
997 uint64_t va
= query
->workaround_buf
->gpu_address
+ query
->workaround_offset
;
998 emit_set_predicate(ctx
, query
->workaround_buf
, va
, op
);
1002 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
1004 /* emit predicate packets for all data blocks */
1005 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1006 unsigned results_base
= 0;
1007 uint64_t va_base
= qbuf
->buf
->gpu_address
;
1009 while (results_base
< qbuf
->results_end
) {
1010 uint64_t va
= va_base
+ results_base
;
1012 if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
) {
1013 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
) {
1014 emit_set_predicate(ctx
, qbuf
->buf
, va
+ 32 * stream
, op
);
1016 /* set CONTINUE bit for all packets except the first */
1017 op
|= PREDICATION_CONTINUE
;
1020 emit_set_predicate(ctx
, qbuf
->buf
, va
, op
);
1021 op
|= PREDICATION_CONTINUE
;
1024 results_base
+= query
->result_size
;
1029 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
1031 struct r600_common_screen
*rscreen
=
1032 (struct r600_common_screen
*)ctx
->screen
;
1034 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
1035 query_type
== PIPE_QUERY_GPU_FINISHED
||
1036 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
1037 return r600_query_sw_create(query_type
);
1039 return r600_query_hw_create(rscreen
, query_type
, index
);
1042 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1044 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1045 struct r600_query
*rquery
= (struct r600_query
*)query
;
1047 rquery
->ops
->destroy(rctx
->screen
, rquery
);
1050 static boolean
r600_begin_query(struct pipe_context
*ctx
,
1051 struct pipe_query
*query
)
1053 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1054 struct r600_query
*rquery
= (struct r600_query
*)query
;
1056 return rquery
->ops
->begin(rctx
, rquery
);
1059 void si_query_hw_reset_buffers(struct r600_common_context
*rctx
,
1060 struct r600_query_hw
*query
)
1062 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
1064 /* Discard the old query buffers. */
1066 struct r600_query_buffer
*qbuf
= prev
;
1067 prev
= prev
->previous
;
1068 r600_resource_reference(&qbuf
->buf
, NULL
);
1072 query
->buffer
.results_end
= 0;
1073 query
->buffer
.previous
= NULL
;
1075 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1076 if (si_rings_is_buffer_referenced(rctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
1077 !rctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
1078 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1079 query
->buffer
.buf
= r600_new_query_buffer(rctx
->screen
, query
);
1081 if (!query
->ops
->prepare_buffer(rctx
->screen
, query
, query
->buffer
.buf
))
1082 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1086 bool si_query_hw_begin(struct r600_common_context
*rctx
,
1087 struct r600_query
*rquery
)
1089 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1091 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
1096 if (!(query
->flags
& R600_QUERY_HW_FLAG_BEGIN_RESUMES
))
1097 si_query_hw_reset_buffers(rctx
, query
);
1099 r600_resource_reference(&query
->workaround_buf
, NULL
);
1101 r600_query_hw_emit_start(rctx
, query
);
1102 if (!query
->buffer
.buf
)
1105 LIST_ADDTAIL(&query
->list
, &rctx
->active_queries
);
1109 static bool r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1111 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1112 struct r600_query
*rquery
= (struct r600_query
*)query
;
1114 return rquery
->ops
->end(rctx
, rquery
);
1117 bool si_query_hw_end(struct r600_common_context
*rctx
,
1118 struct r600_query
*rquery
)
1120 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1122 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
)
1123 si_query_hw_reset_buffers(rctx
, query
);
1125 r600_query_hw_emit_stop(rctx
, query
);
1127 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
1128 LIST_DELINIT(&query
->list
);
1130 if (!query
->buffer
.buf
)
1136 static void r600_get_hw_query_params(struct r600_common_context
*rctx
,
1137 struct r600_query_hw
*rquery
, int index
,
1138 struct r600_hw_query_params
*params
)
1140 unsigned max_rbs
= rctx
->screen
->info
.num_render_backends
;
1142 params
->pair_stride
= 0;
1143 params
->pair_count
= 1;
1145 switch (rquery
->b
.type
) {
1146 case PIPE_QUERY_OCCLUSION_COUNTER
:
1147 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1148 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1149 params
->start_offset
= 0;
1150 params
->end_offset
= 8;
1151 params
->fence_offset
= max_rbs
* 16;
1152 params
->pair_stride
= 16;
1153 params
->pair_count
= max_rbs
;
1155 case PIPE_QUERY_TIME_ELAPSED
:
1156 params
->start_offset
= 0;
1157 params
->end_offset
= 8;
1158 params
->fence_offset
= 16;
1160 case PIPE_QUERY_TIMESTAMP
:
1161 params
->start_offset
= 0;
1162 params
->end_offset
= 0;
1163 params
->fence_offset
= 8;
1165 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1166 params
->start_offset
= 8;
1167 params
->end_offset
= 24;
1168 params
->fence_offset
= params
->end_offset
+ 4;
1170 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1171 params
->start_offset
= 0;
1172 params
->end_offset
= 16;
1173 params
->fence_offset
= params
->end_offset
+ 4;
1175 case PIPE_QUERY_SO_STATISTICS
:
1176 params
->start_offset
= 8 - index
* 8;
1177 params
->end_offset
= 24 - index
* 8;
1178 params
->fence_offset
= params
->end_offset
+ 4;
1180 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1181 params
->pair_count
= R600_MAX_STREAMS
;
1182 params
->pair_stride
= 32;
1183 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1184 params
->start_offset
= 0;
1185 params
->end_offset
= 16;
1187 /* We can re-use the high dword of the last 64-bit value as a
1188 * fence: it is initialized as 0, and the high bit is set by
1189 * the write of the streamout stats event.
1191 params
->fence_offset
= rquery
->result_size
- 4;
1193 case PIPE_QUERY_PIPELINE_STATISTICS
:
1195 /* Offsets apply to EG+ */
1196 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1197 params
->start_offset
= offsets
[index
];
1198 params
->end_offset
= 88 + offsets
[index
];
1199 params
->fence_offset
= 2 * 88;
1203 unreachable("r600_get_hw_query_params unsupported");
1207 static unsigned r600_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1208 bool test_status_bit
)
1210 uint32_t *current_result
= (uint32_t*)map
;
1211 uint64_t start
, end
;
1213 start
= (uint64_t)current_result
[start_index
] |
1214 (uint64_t)current_result
[start_index
+1] << 32;
1215 end
= (uint64_t)current_result
[end_index
] |
1216 (uint64_t)current_result
[end_index
+1] << 32;
1218 if (!test_status_bit
||
1219 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1225 static void r600_query_hw_add_result(struct r600_common_screen
*rscreen
,
1226 struct r600_query_hw
*query
,
1228 union pipe_query_result
*result
)
1230 unsigned max_rbs
= rscreen
->info
.num_render_backends
;
1232 switch (query
->b
.type
) {
1233 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1234 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1235 unsigned results_base
= i
* 16;
1237 r600_query_read_result(buffer
+ results_base
, 0, 2, true);
1241 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1242 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1243 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1244 unsigned results_base
= i
* 16;
1245 result
->b
= result
->b
||
1246 r600_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1250 case PIPE_QUERY_TIME_ELAPSED
:
1251 result
->u64
+= r600_query_read_result(buffer
, 0, 2, false);
1253 case PIPE_QUERY_TIMESTAMP
:
1254 result
->u64
= *(uint64_t*)buffer
;
1256 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1257 /* SAMPLE_STREAMOUTSTATS stores this structure:
1259 * u64 NumPrimitivesWritten;
1260 * u64 PrimitiveStorageNeeded;
1262 * We only need NumPrimitivesWritten here. */
1263 result
->u64
+= r600_query_read_result(buffer
, 2, 6, true);
1265 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1266 /* Here we read PrimitiveStorageNeeded. */
1267 result
->u64
+= r600_query_read_result(buffer
, 0, 4, true);
1269 case PIPE_QUERY_SO_STATISTICS
:
1270 result
->so_statistics
.num_primitives_written
+=
1271 r600_query_read_result(buffer
, 2, 6, true);
1272 result
->so_statistics
.primitives_storage_needed
+=
1273 r600_query_read_result(buffer
, 0, 4, true);
1275 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1276 result
->b
= result
->b
||
1277 r600_query_read_result(buffer
, 2, 6, true) !=
1278 r600_query_read_result(buffer
, 0, 4, true);
1280 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1281 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
) {
1282 result
->b
= result
->b
||
1283 r600_query_read_result(buffer
, 2, 6, true) !=
1284 r600_query_read_result(buffer
, 0, 4, true);
1285 buffer
= (char *)buffer
+ 32;
1288 case PIPE_QUERY_PIPELINE_STATISTICS
:
1289 result
->pipeline_statistics
.ps_invocations
+=
1290 r600_query_read_result(buffer
, 0, 22, false);
1291 result
->pipeline_statistics
.c_primitives
+=
1292 r600_query_read_result(buffer
, 2, 24, false);
1293 result
->pipeline_statistics
.c_invocations
+=
1294 r600_query_read_result(buffer
, 4, 26, false);
1295 result
->pipeline_statistics
.vs_invocations
+=
1296 r600_query_read_result(buffer
, 6, 28, false);
1297 result
->pipeline_statistics
.gs_invocations
+=
1298 r600_query_read_result(buffer
, 8, 30, false);
1299 result
->pipeline_statistics
.gs_primitives
+=
1300 r600_query_read_result(buffer
, 10, 32, false);
1301 result
->pipeline_statistics
.ia_primitives
+=
1302 r600_query_read_result(buffer
, 12, 34, false);
1303 result
->pipeline_statistics
.ia_vertices
+=
1304 r600_query_read_result(buffer
, 14, 36, false);
1305 result
->pipeline_statistics
.hs_invocations
+=
1306 r600_query_read_result(buffer
, 16, 38, false);
1307 result
->pipeline_statistics
.ds_invocations
+=
1308 r600_query_read_result(buffer
, 18, 40, false);
1309 result
->pipeline_statistics
.cs_invocations
+=
1310 r600_query_read_result(buffer
, 20, 42, false);
1311 #if 0 /* for testing */
1312 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1313 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1314 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1315 result
->pipeline_statistics
.ia_vertices
,
1316 result
->pipeline_statistics
.ia_primitives
,
1317 result
->pipeline_statistics
.vs_invocations
,
1318 result
->pipeline_statistics
.hs_invocations
,
1319 result
->pipeline_statistics
.ds_invocations
,
1320 result
->pipeline_statistics
.gs_invocations
,
1321 result
->pipeline_statistics
.gs_primitives
,
1322 result
->pipeline_statistics
.c_invocations
,
1323 result
->pipeline_statistics
.c_primitives
,
1324 result
->pipeline_statistics
.ps_invocations
,
1325 result
->pipeline_statistics
.cs_invocations
);
1333 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
1334 struct pipe_query
*query
, boolean wait
,
1335 union pipe_query_result
*result
)
1337 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1338 struct r600_query
*rquery
= (struct r600_query
*)query
;
1340 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
1343 static void r600_get_query_result_resource(struct pipe_context
*ctx
,
1344 struct pipe_query
*query
,
1346 enum pipe_query_value_type result_type
,
1348 struct pipe_resource
*resource
,
1351 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1352 struct r600_query
*rquery
= (struct r600_query
*)query
;
1354 rquery
->ops
->get_result_resource(rctx
, rquery
, wait
, result_type
, index
,
1358 static void r600_query_hw_clear_result(struct r600_query_hw
*query
,
1359 union pipe_query_result
*result
)
1361 util_query_clear_result(result
, query
->b
.type
);
1364 bool si_query_hw_get_result(struct r600_common_context
*rctx
,
1365 struct r600_query
*rquery
,
1366 bool wait
, union pipe_query_result
*result
)
1368 struct r600_common_screen
*rscreen
= rctx
->screen
;
1369 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1370 struct r600_query_buffer
*qbuf
;
1372 query
->ops
->clear_result(query
, result
);
1374 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1375 unsigned usage
= PIPE_TRANSFER_READ
|
1376 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
);
1377 unsigned results_base
= 0;
1380 if (rquery
->b
.flushed
)
1381 map
= rctx
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
, usage
);
1383 map
= si_buffer_map_sync_with_rings(rctx
, qbuf
->buf
, usage
);
1388 while (results_base
!= qbuf
->results_end
) {
1389 query
->ops
->add_result(rscreen
, query
, map
+ results_base
,
1391 results_base
+= query
->result_size
;
1395 /* Convert the time to expected units. */
1396 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1397 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1398 result
->u64
= (1000000 * result
->u64
) / rscreen
->info
.clock_crystal_freq
;
1403 /* Create the compute shader that is used to collect the results.
1405 * One compute grid with a single thread is launched for every query result
1406 * buffer. The thread (optionally) reads a previous summary buffer, then
1407 * accumulates data from the query result buffer, and writes the result either
1408 * to a summary buffer to be consumed by the next grid invocation or to the
1409 * user-supplied buffer.
1415 * 0.y = result_stride
1416 * 0.z = result_count
1418 * 1: read previously accumulated values
1419 * 2: write accumulated values for chaining
1420 * 4: write result available
1421 * 8: convert result to boolean (0/1)
1422 * 16: only read one dword and use that as result
1423 * 32: apply timestamp conversion
1424 * 64: store full 64 bits result
1425 * 128: store signed 32 bits result
1426 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1427 * 1.x = fence_offset
1431 * BUFFER[0] = query result buffer
1432 * BUFFER[1] = previous summary buffer
1433 * BUFFER[2] = next summary buffer or user-supplied buffer
1435 static void r600_create_query_result_shader(struct r600_common_context
*rctx
)
1437 /* TEMP[0].xy = accumulated result so far
1438 * TEMP[0].z = result not available
1440 * TEMP[1].x = current result index
1441 * TEMP[1].y = current pair index
1443 static const char text_tmpl
[] =
1445 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1446 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1447 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1451 "DCL CONST[0][0..1]\n"
1453 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1454 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1455 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1456 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1457 "IMM[4] UINT32 {256, 0, 0, 0}\n"
1459 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1461 /* Check result availability. */
1462 "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
1463 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1464 "MOV TEMP[1], TEMP[0].zzzz\n"
1465 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1467 /* Load result if available. */
1469 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1472 /* Load previously accumulated result if requested. */
1473 "MOV TEMP[0], IMM[0].xxxx\n"
1474 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1476 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1479 "MOV TEMP[1].x, IMM[0].xxxx\n"
1481 /* Break if accumulated result so far is not available. */
1482 "UIF TEMP[0].zzzz\n"
1486 /* Break if result_index >= result_count. */
1487 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1492 /* Load fence and check result availability */
1493 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1494 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1495 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1496 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1497 "UIF TEMP[0].zzzz\n"
1501 "MOV TEMP[1].y, IMM[0].xxxx\n"
1503 /* Load start and end. */
1504 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1505 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1506 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1508 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1509 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1511 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1513 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1514 "UIF TEMP[5].zzzz\n"
1515 /* Load second start/end half-pair and
1516 * take the difference
1518 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1519 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1520 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1522 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1523 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1526 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1528 /* Increment pair index */
1529 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1530 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1536 /* Increment result index */
1537 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1541 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1543 /* Store accumulated data for chaining. */
1544 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1546 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1548 /* Store result availability. */
1549 "NOT TEMP[0].z, TEMP[0]\n"
1550 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1551 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1553 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1555 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1558 /* Store result if it is available. */
1559 "NOT TEMP[4], TEMP[0].zzzz\n"
1561 /* Apply timestamp conversion */
1562 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1564 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1565 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1568 /* Convert to boolean */
1569 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1571 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1572 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1573 "MOV TEMP[0].y, IMM[0].xxxx\n"
1576 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1578 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1581 "UIF TEMP[0].yyyy\n"
1582 "MOV TEMP[0].x, IMM[0].wwww\n"
1585 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1587 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1590 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1598 char text
[sizeof(text_tmpl
) + 32];
1599 struct tgsi_token tokens
[1024];
1600 struct pipe_compute_state state
= {};
1602 /* Hard code the frequency into the shader so that the backend can
1603 * use the full range of optimizations for divide-by-constant.
1605 snprintf(text
, sizeof(text
), text_tmpl
,
1606 rctx
->screen
->info
.clock_crystal_freq
);
1608 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
1613 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
1614 state
.prog
= tokens
;
1616 rctx
->query_result_shader
= rctx
->b
.create_compute_state(&rctx
->b
, &state
);
1619 static void r600_restore_qbo_state(struct r600_common_context
*rctx
,
1620 struct r600_qbo_state
*st
)
1622 rctx
->b
.bind_compute_state(&rctx
->b
, st
->saved_compute
);
1624 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1625 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1627 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
);
1628 for (unsigned i
= 0; i
< 3; ++i
)
1629 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1632 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
1633 struct r600_query
*rquery
,
1635 enum pipe_query_value_type result_type
,
1637 struct pipe_resource
*resource
,
1640 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1641 struct r600_query_buffer
*qbuf
;
1642 struct r600_query_buffer
*qbuf_prev
;
1643 struct pipe_resource
*tmp_buffer
= NULL
;
1644 unsigned tmp_buffer_offset
= 0;
1645 struct r600_qbo_state saved_state
= {};
1646 struct pipe_grid_info grid
= {};
1647 struct pipe_constant_buffer constant_buffer
= {};
1648 struct pipe_shader_buffer ssbo
[3];
1649 struct r600_hw_query_params params
;
1651 uint32_t end_offset
;
1652 uint32_t result_stride
;
1653 uint32_t result_count
;
1655 uint32_t fence_offset
;
1656 uint32_t pair_stride
;
1657 uint32_t pair_count
;
1660 if (!rctx
->query_result_shader
) {
1661 r600_create_query_result_shader(rctx
);
1662 if (!rctx
->query_result_shader
)
1666 if (query
->buffer
.previous
) {
1667 u_suballocator_alloc(rctx
->allocator_zeroed_memory
, 16, 16,
1668 &tmp_buffer_offset
, &tmp_buffer
);
1673 rctx
->save_qbo_state(&rctx
->b
, &saved_state
);
1675 r600_get_hw_query_params(rctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1676 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1677 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1678 consts
.result_stride
= query
->result_size
;
1679 consts
.pair_stride
= params
.pair_stride
;
1680 consts
.pair_count
= params
.pair_count
;
1682 constant_buffer
.buffer_size
= sizeof(consts
);
1683 constant_buffer
.user_buffer
= &consts
;
1685 ssbo
[1].buffer
= tmp_buffer
;
1686 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1687 ssbo
[1].buffer_size
= 16;
1691 rctx
->b
.bind_compute_state(&rctx
->b
, rctx
->query_result_shader
);
1703 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1704 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
)
1706 else if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1707 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1708 consts
.config
|= 8 | 256;
1709 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1710 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1711 consts
.config
|= 32;
1713 switch (result_type
) {
1714 case PIPE_QUERY_TYPE_U64
:
1715 case PIPE_QUERY_TYPE_I64
:
1716 consts
.config
|= 64;
1718 case PIPE_QUERY_TYPE_I32
:
1719 consts
.config
|= 128;
1721 case PIPE_QUERY_TYPE_U32
:
1725 rctx
->flags
|= rctx
->screen
->barrier_flags
.cp_to_L2
;
1727 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1728 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1729 qbuf_prev
= qbuf
->previous
;
1730 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1731 consts
.config
&= ~3;
1732 if (qbuf
!= &query
->buffer
)
1737 /* Only read the last timestamp. */
1739 consts
.result_count
= 0;
1740 consts
.config
|= 16;
1741 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1744 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1746 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1747 ssbo
[0].buffer_offset
= params
.start_offset
;
1748 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1750 if (!qbuf
->previous
) {
1751 ssbo
[2].buffer
= resource
;
1752 ssbo
[2].buffer_offset
= offset
;
1753 ssbo
[2].buffer_size
= 8;
1755 ((struct r600_resource
*)resource
)->TC_L2_dirty
= true;
1758 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
);
1760 if (wait
&& qbuf
== &query
->buffer
) {
1763 /* Wait for result availability. Wait only for readiness
1764 * of the last entry, since the fence writes should be
1765 * serialized in the CP.
1767 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1768 va
+= params
.fence_offset
;
1770 si_gfx_wait_fence(rctx
, va
, 0x80000000, 0x80000000);
1773 rctx
->b
.launch_grid(&rctx
->b
, &grid
);
1774 rctx
->flags
|= rctx
->screen
->barrier_flags
.compute_to_L2
;
1777 r600_restore_qbo_state(rctx
, &saved_state
);
1778 pipe_resource_reference(&tmp_buffer
, NULL
);
1781 static void r600_render_condition(struct pipe_context
*ctx
,
1782 struct pipe_query
*query
,
1784 enum pipe_render_cond_flag mode
)
1786 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1787 struct r600_query_hw
*rquery
= (struct r600_query_hw
*)query
;
1788 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
1791 bool needs_workaround
= false;
1793 /* There was a firmware regression in VI which causes successive
1794 * SET_PREDICATION packets to give the wrong answer for
1795 * non-inverted stream overflow predication.
1797 if (((rctx
->chip_class
== VI
&& rctx
->screen
->info
.pfp_fw_feature
< 49) ||
1798 (rctx
->chip_class
== GFX9
&& rctx
->screen
->info
.pfp_fw_feature
< 38)) &&
1800 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
||
1801 (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
&&
1802 (rquery
->buffer
.previous
||
1803 rquery
->buffer
.results_end
> rquery
->result_size
)))) {
1804 needs_workaround
= true;
1807 if (needs_workaround
&& !rquery
->workaround_buf
) {
1808 bool old_force_off
= rctx
->render_cond_force_off
;
1809 rctx
->render_cond_force_off
= true;
1811 u_suballocator_alloc(
1812 rctx
->allocator_zeroed_memory
, 8, 8,
1813 &rquery
->workaround_offset
,
1814 (struct pipe_resource
**)&rquery
->workaround_buf
);
1816 /* Reset to NULL to avoid a redundant SET_PREDICATION
1817 * from launching the compute grid.
1819 rctx
->render_cond
= NULL
;
1821 ctx
->get_query_result_resource(
1822 ctx
, query
, true, PIPE_QUERY_TYPE_U64
, 0,
1823 &rquery
->workaround_buf
->b
.b
, rquery
->workaround_offset
);
1825 /* Settings this in the render cond atom is too late,
1826 * so set it here. */
1827 rctx
->flags
|= rctx
->screen
->barrier_flags
.L2_to_cp
|
1828 R600_CONTEXT_FLUSH_FOR_RENDER_COND
;
1830 rctx
->render_cond_force_off
= old_force_off
;
1834 rctx
->render_cond
= query
;
1835 rctx
->render_cond_invert
= condition
;
1836 rctx
->render_cond_mode
= mode
;
1838 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
1841 void si_suspend_queries(struct r600_common_context
*ctx
)
1843 struct r600_query_hw
*query
;
1845 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1846 r600_query_hw_emit_stop(ctx
, query
);
1848 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1851 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
1852 struct list_head
*query_list
)
1854 struct r600_query_hw
*query
;
1855 unsigned num_dw
= 0;
1857 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
1859 num_dw
+= query
->num_cs_dw_begin
+ query
->num_cs_dw_end
;
1861 /* Workaround for the fact that
1862 * num_cs_dw_nontimer_queries_suspend is incremented for every
1863 * resumed query, which raises the bar in need_cs_space for
1864 * queries about to be resumed.
1866 num_dw
+= query
->num_cs_dw_end
;
1868 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1874 void si_resume_queries(struct r600_common_context
*ctx
)
1876 struct r600_query_hw
*query
;
1877 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, &ctx
->active_queries
);
1879 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1881 /* Check CS space here. Resuming must not be interrupted by flushes. */
1882 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, true);
1884 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1885 r600_query_hw_emit_start(ctx
, query
);
1889 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1892 .query_type = R600_QUERY_##query_type_, \
1893 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1894 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1895 .group_id = group_id_ \
1898 #define X(name_, query_type_, type_, result_type_) \
1899 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1901 #define XG(group_, name_, query_type_, type_, result_type_) \
1902 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1904 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1905 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1906 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1907 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1908 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1909 X("decompress-calls", DECOMPRESS_CALLS
, UINT64
, AVERAGE
),
1910 X("MRT-draw-calls", MRT_DRAW_CALLS
, UINT64
, AVERAGE
),
1911 X("prim-restart-calls", PRIM_RESTART_CALLS
, UINT64
, AVERAGE
),
1912 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1913 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1914 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1915 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1916 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1917 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1918 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1919 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1920 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1921 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1922 X("num-L2-invalidates", NUM_L2_INVALIDATES
, UINT64
, AVERAGE
),
1923 X("num-L2-writebacks", NUM_L2_WRITEBACKS
, UINT64
, AVERAGE
),
1924 X("num-resident-handles", NUM_RESIDENT_HANDLES
, UINT64
, AVERAGE
),
1925 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS
, UINT64
, AVERAGE
),
1926 X("tc-direct-slots", TC_DIRECT_SLOTS
, UINT64
, AVERAGE
),
1927 X("tc-num-syncs", TC_NUM_SYNCS
, UINT64
, AVERAGE
),
1928 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1929 X("gallium-thread-busy", GALLIUM_THREAD_BUSY
, UINT64
, AVERAGE
),
1930 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1931 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1932 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1933 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1934 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1935 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1936 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1937 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1938 X("GFX-BO-list-size", GFX_BO_LIST_SIZE
, UINT64
, AVERAGE
),
1939 X("GFX-IB-size", GFX_IB_SIZE
, UINT64
, AVERAGE
),
1940 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1941 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1942 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS
, UINT64
, CUMULATIVE
),
1943 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1944 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1945 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1946 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1948 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1949 * which use it as a fallback path to detect the GPU type.
1951 * Note: The names of these queries are significant for GPUPerfStudio
1952 * (and possibly their order as well). */
1953 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1954 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1955 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1956 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1957 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1959 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1960 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1961 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1963 /* The following queries must be at the end of the list because their
1964 * availability is adjusted dynamically based on the DRM version. */
1965 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1966 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
1967 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
1968 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
1969 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
1970 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
1971 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
1972 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
1973 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
1974 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
1975 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
1976 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
1977 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
1978 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
1979 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
1980 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
1981 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
1982 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
1983 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
1984 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY
, UINT64
, AVERAGE
),
1985 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
1992 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1994 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1995 return ARRAY_SIZE(r600_driver_query_list
);
1996 else if (rscreen
->info
.drm_major
== 3) {
1997 if (rscreen
->chip_class
>= VI
)
1998 return ARRAY_SIZE(r600_driver_query_list
);
2000 return ARRAY_SIZE(r600_driver_query_list
) - 7;
2003 return ARRAY_SIZE(r600_driver_query_list
) - 25;
2006 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
2008 struct pipe_driver_query_info
*info
)
2010 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
2011 unsigned num_queries
= r600_get_num_queries(rscreen
);
2014 unsigned num_perfcounters
=
2015 si_get_perfcounter_info(rscreen
, 0, NULL
);
2017 return num_queries
+ num_perfcounters
;
2020 if (index
>= num_queries
)
2021 return si_get_perfcounter_info(rscreen
, index
- num_queries
, info
);
2023 *info
= r600_driver_query_list
[index
];
2025 switch (info
->query_type
) {
2026 case R600_QUERY_REQUESTED_VRAM
:
2027 case R600_QUERY_VRAM_USAGE
:
2028 case R600_QUERY_MAPPED_VRAM
:
2029 info
->max_value
.u64
= rscreen
->info
.vram_size
;
2031 case R600_QUERY_REQUESTED_GTT
:
2032 case R600_QUERY_GTT_USAGE
:
2033 case R600_QUERY_MAPPED_GTT
:
2034 info
->max_value
.u64
= rscreen
->info
.gart_size
;
2036 case R600_QUERY_GPU_TEMPERATURE
:
2037 info
->max_value
.u64
= 125;
2039 case R600_QUERY_VRAM_VIS_USAGE
:
2040 info
->max_value
.u64
= rscreen
->info
.vram_vis_size
;
2044 if (info
->group_id
!= ~(unsigned)0 && rscreen
->perfcounters
)
2045 info
->group_id
+= rscreen
->perfcounters
->num_groups
;
2050 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2051 * performance counter groups, so be careful when changing this and related
2054 static int r600_get_driver_query_group_info(struct pipe_screen
*screen
,
2056 struct pipe_driver_query_group_info
*info
)
2058 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
2059 unsigned num_pc_groups
= 0;
2061 if (rscreen
->perfcounters
)
2062 num_pc_groups
= rscreen
->perfcounters
->num_groups
;
2065 return num_pc_groups
+ R600_NUM_SW_QUERY_GROUPS
;
2067 if (index
< num_pc_groups
)
2068 return si_get_perfcounter_group_info(rscreen
, index
, info
);
2070 index
-= num_pc_groups
;
2071 if (index
>= R600_NUM_SW_QUERY_GROUPS
)
2074 info
->name
= "GPIN";
2075 info
->max_active_queries
= 5;
2076 info
->num_queries
= 5;
2080 void si_init_query_functions(struct r600_common_context
*rctx
)
2082 rctx
->b
.create_query
= r600_create_query
;
2083 rctx
->b
.create_batch_query
= si_create_batch_query
;
2084 rctx
->b
.destroy_query
= r600_destroy_query
;
2085 rctx
->b
.begin_query
= r600_begin_query
;
2086 rctx
->b
.end_query
= r600_end_query
;
2087 rctx
->b
.get_query_result
= r600_get_query_result
;
2088 rctx
->b
.get_query_result_resource
= r600_get_query_result_resource
;
2089 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
2091 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.num_render_backends
> 0)
2092 rctx
->b
.render_condition
= r600_render_condition
;
2094 LIST_INITHEAD(&rctx
->active_queries
);
2097 void si_init_screen_query_functions(struct r600_common_screen
*rscreen
)
2099 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;
2100 rscreen
->b
.get_driver_query_group_info
= r600_get_driver_query_group_info
;