2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
26 #include "r600_pipe.h"
28 #include "util/u_memory.h"
29 #include "util/u_upload_mgr.h"
30 #include "util/os_time.h"
31 #include "tgsi/tgsi_text.h"
33 #define R600_MAX_STREAMS 4
35 struct r600_hw_query_params
{
36 unsigned start_offset
;
38 unsigned fence_offset
;
43 /* Queries without buffer handling or suspend/resume. */
44 struct r600_query_sw
{
47 uint64_t begin_result
;
53 /* Fence for GPU_FINISHED. */
54 struct pipe_fence_handle
*fence
;
57 static void r600_query_sw_destroy(struct r600_common_screen
*rscreen
,
58 struct r600_query
*rquery
)
60 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
62 rscreen
->b
.fence_reference(&rscreen
->b
, &query
->fence
, NULL
);
66 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
69 case R600_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
70 case R600_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
71 case R600_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
72 case R600_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
73 case R600_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
74 case R600_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
75 case R600_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
76 case R600_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
77 case R600_QUERY_GFX_BO_LIST_SIZE
: return RADEON_GFX_BO_LIST_COUNTER
;
78 case R600_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
79 case R600_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
80 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS
;
81 case R600_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
82 case R600_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
83 case R600_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
84 case R600_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
85 case R600_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
86 case R600_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
87 case R600_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
88 default: unreachable("query type does not correspond to winsys id");
92 static bool r600_query_sw_begin(struct r600_common_context
*rctx
,
93 struct r600_query
*rquery
)
95 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
96 enum radeon_value_id ws_id
;
98 switch(query
->b
.type
) {
99 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
100 case PIPE_QUERY_GPU_FINISHED
:
102 case R600_QUERY_DRAW_CALLS
:
103 query
->begin_result
= rctx
->num_draw_calls
;
105 case R600_QUERY_DECOMPRESS_CALLS
:
106 query
->begin_result
= rctx
->num_decompress_calls
;
108 case R600_QUERY_MRT_DRAW_CALLS
:
109 query
->begin_result
= rctx
->num_mrt_draw_calls
;
111 case R600_QUERY_PRIM_RESTART_CALLS
:
112 query
->begin_result
= rctx
->num_prim_restart_calls
;
114 case R600_QUERY_SPILL_DRAW_CALLS
:
115 query
->begin_result
= rctx
->num_spill_draw_calls
;
117 case R600_QUERY_COMPUTE_CALLS
:
118 query
->begin_result
= rctx
->num_compute_calls
;
120 case R600_QUERY_SPILL_COMPUTE_CALLS
:
121 query
->begin_result
= rctx
->num_spill_compute_calls
;
123 case R600_QUERY_DMA_CALLS
:
124 query
->begin_result
= rctx
->num_dma_calls
;
126 case R600_QUERY_CP_DMA_CALLS
:
127 query
->begin_result
= rctx
->num_cp_dma_calls
;
129 case R600_QUERY_NUM_VS_FLUSHES
:
130 query
->begin_result
= rctx
->num_vs_flushes
;
132 case R600_QUERY_NUM_PS_FLUSHES
:
133 query
->begin_result
= rctx
->num_ps_flushes
;
135 case R600_QUERY_NUM_CS_FLUSHES
:
136 query
->begin_result
= rctx
->num_cs_flushes
;
138 case R600_QUERY_NUM_CB_CACHE_FLUSHES
:
139 query
->begin_result
= rctx
->num_cb_cache_flushes
;
141 case R600_QUERY_NUM_DB_CACHE_FLUSHES
:
142 query
->begin_result
= rctx
->num_db_cache_flushes
;
144 case R600_QUERY_NUM_RESIDENT_HANDLES
:
145 query
->begin_result
= rctx
->num_resident_handles
;
147 case R600_QUERY_TC_OFFLOADED_SLOTS
:
148 query
->begin_result
= rctx
->tc
? rctx
->tc
->num_offloaded_slots
: 0;
150 case R600_QUERY_TC_DIRECT_SLOTS
:
151 query
->begin_result
= rctx
->tc
? rctx
->tc
->num_direct_slots
: 0;
153 case R600_QUERY_TC_NUM_SYNCS
:
154 query
->begin_result
= rctx
->tc
? rctx
->tc
->num_syncs
: 0;
156 case R600_QUERY_REQUESTED_VRAM
:
157 case R600_QUERY_REQUESTED_GTT
:
158 case R600_QUERY_MAPPED_VRAM
:
159 case R600_QUERY_MAPPED_GTT
:
160 case R600_QUERY_VRAM_USAGE
:
161 case R600_QUERY_VRAM_VIS_USAGE
:
162 case R600_QUERY_GTT_USAGE
:
163 case R600_QUERY_GPU_TEMPERATURE
:
164 case R600_QUERY_CURRENT_GPU_SCLK
:
165 case R600_QUERY_CURRENT_GPU_MCLK
:
166 case R600_QUERY_NUM_MAPPED_BUFFERS
:
167 query
->begin_result
= 0;
169 case R600_QUERY_BUFFER_WAIT_TIME
:
170 case R600_QUERY_NUM_GFX_IBS
:
171 case R600_QUERY_NUM_SDMA_IBS
:
172 case R600_QUERY_NUM_BYTES_MOVED
:
173 case R600_QUERY_NUM_EVICTIONS
:
174 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
175 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
176 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
179 case R600_QUERY_GFX_BO_LIST_SIZE
:
180 ws_id
= winsys_id_from_type(query
->b
.type
);
181 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
182 query
->begin_time
= rctx
->ws
->query_value(rctx
->ws
,
185 case R600_QUERY_CS_THREAD_BUSY
:
186 ws_id
= winsys_id_from_type(query
->b
.type
);
187 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
188 query
->begin_time
= os_time_get_nano();
190 case R600_QUERY_GALLIUM_THREAD_BUSY
:
191 query
->begin_result
=
192 rctx
->tc
? util_queue_get_thread_time_nano(&rctx
->tc
->queue
, 0) : 0;
193 query
->begin_time
= os_time_get_nano();
195 case R600_QUERY_GPU_LOAD
:
196 case R600_QUERY_GPU_SHADERS_BUSY
:
197 case R600_QUERY_GPU_TA_BUSY
:
198 case R600_QUERY_GPU_GDS_BUSY
:
199 case R600_QUERY_GPU_VGT_BUSY
:
200 case R600_QUERY_GPU_IA_BUSY
:
201 case R600_QUERY_GPU_SX_BUSY
:
202 case R600_QUERY_GPU_WD_BUSY
:
203 case R600_QUERY_GPU_BCI_BUSY
:
204 case R600_QUERY_GPU_SC_BUSY
:
205 case R600_QUERY_GPU_PA_BUSY
:
206 case R600_QUERY_GPU_DB_BUSY
:
207 case R600_QUERY_GPU_CP_BUSY
:
208 case R600_QUERY_GPU_CB_BUSY
:
209 case R600_QUERY_GPU_SDMA_BUSY
:
210 case R600_QUERY_GPU_PFP_BUSY
:
211 case R600_QUERY_GPU_MEQ_BUSY
:
212 case R600_QUERY_GPU_ME_BUSY
:
213 case R600_QUERY_GPU_SURF_SYNC_BUSY
:
214 case R600_QUERY_GPU_CP_DMA_BUSY
:
215 case R600_QUERY_GPU_SCRATCH_RAM_BUSY
:
216 query
->begin_result
= r600_begin_counter(rctx
->screen
,
219 case R600_QUERY_NUM_COMPILATIONS
:
220 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
222 case R600_QUERY_NUM_SHADERS_CREATED
:
223 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
225 case R600_QUERY_NUM_SHADER_CACHE_HITS
:
226 query
->begin_result
=
227 p_atomic_read(&rctx
->screen
->num_shader_cache_hits
);
229 case R600_QUERY_GPIN_ASIC_ID
:
230 case R600_QUERY_GPIN_NUM_SIMD
:
231 case R600_QUERY_GPIN_NUM_RB
:
232 case R600_QUERY_GPIN_NUM_SPI
:
233 case R600_QUERY_GPIN_NUM_SE
:
236 unreachable("r600_query_sw_begin: bad query type");
242 static bool r600_query_sw_end(struct r600_common_context
*rctx
,
243 struct r600_query
*rquery
)
245 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
246 enum radeon_value_id ws_id
;
248 switch(query
->b
.type
) {
249 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
251 case PIPE_QUERY_GPU_FINISHED
:
252 rctx
->b
.flush(&rctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
254 case R600_QUERY_DRAW_CALLS
:
255 query
->end_result
= rctx
->num_draw_calls
;
257 case R600_QUERY_DECOMPRESS_CALLS
:
258 query
->end_result
= rctx
->num_decompress_calls
;
260 case R600_QUERY_MRT_DRAW_CALLS
:
261 query
->end_result
= rctx
->num_mrt_draw_calls
;
263 case R600_QUERY_PRIM_RESTART_CALLS
:
264 query
->end_result
= rctx
->num_prim_restart_calls
;
266 case R600_QUERY_SPILL_DRAW_CALLS
:
267 query
->end_result
= rctx
->num_spill_draw_calls
;
269 case R600_QUERY_COMPUTE_CALLS
:
270 query
->end_result
= rctx
->num_compute_calls
;
272 case R600_QUERY_SPILL_COMPUTE_CALLS
:
273 query
->end_result
= rctx
->num_spill_compute_calls
;
275 case R600_QUERY_DMA_CALLS
:
276 query
->end_result
= rctx
->num_dma_calls
;
278 case R600_QUERY_CP_DMA_CALLS
:
279 query
->end_result
= rctx
->num_cp_dma_calls
;
281 case R600_QUERY_NUM_VS_FLUSHES
:
282 query
->end_result
= rctx
->num_vs_flushes
;
284 case R600_QUERY_NUM_PS_FLUSHES
:
285 query
->end_result
= rctx
->num_ps_flushes
;
287 case R600_QUERY_NUM_CS_FLUSHES
:
288 query
->end_result
= rctx
->num_cs_flushes
;
290 case R600_QUERY_NUM_CB_CACHE_FLUSHES
:
291 query
->end_result
= rctx
->num_cb_cache_flushes
;
293 case R600_QUERY_NUM_DB_CACHE_FLUSHES
:
294 query
->end_result
= rctx
->num_db_cache_flushes
;
296 case R600_QUERY_NUM_RESIDENT_HANDLES
:
297 query
->end_result
= rctx
->num_resident_handles
;
299 case R600_QUERY_TC_OFFLOADED_SLOTS
:
300 query
->end_result
= rctx
->tc
? rctx
->tc
->num_offloaded_slots
: 0;
302 case R600_QUERY_TC_DIRECT_SLOTS
:
303 query
->end_result
= rctx
->tc
? rctx
->tc
->num_direct_slots
: 0;
305 case R600_QUERY_TC_NUM_SYNCS
:
306 query
->end_result
= rctx
->tc
? rctx
->tc
->num_syncs
: 0;
308 case R600_QUERY_REQUESTED_VRAM
:
309 case R600_QUERY_REQUESTED_GTT
:
310 case R600_QUERY_MAPPED_VRAM
:
311 case R600_QUERY_MAPPED_GTT
:
312 case R600_QUERY_VRAM_USAGE
:
313 case R600_QUERY_VRAM_VIS_USAGE
:
314 case R600_QUERY_GTT_USAGE
:
315 case R600_QUERY_GPU_TEMPERATURE
:
316 case R600_QUERY_CURRENT_GPU_SCLK
:
317 case R600_QUERY_CURRENT_GPU_MCLK
:
318 case R600_QUERY_BUFFER_WAIT_TIME
:
319 case R600_QUERY_NUM_MAPPED_BUFFERS
:
320 case R600_QUERY_NUM_GFX_IBS
:
321 case R600_QUERY_NUM_SDMA_IBS
:
322 case R600_QUERY_NUM_BYTES_MOVED
:
323 case R600_QUERY_NUM_EVICTIONS
:
324 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS
: {
325 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
326 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
329 case R600_QUERY_GFX_BO_LIST_SIZE
:
330 ws_id
= winsys_id_from_type(query
->b
.type
);
331 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
332 query
->end_time
= rctx
->ws
->query_value(rctx
->ws
,
335 case R600_QUERY_CS_THREAD_BUSY
:
336 ws_id
= winsys_id_from_type(query
->b
.type
);
337 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
338 query
->end_time
= os_time_get_nano();
340 case R600_QUERY_GALLIUM_THREAD_BUSY
:
342 rctx
->tc
? util_queue_get_thread_time_nano(&rctx
->tc
->queue
, 0) : 0;
343 query
->end_time
= os_time_get_nano();
345 case R600_QUERY_GPU_LOAD
:
346 case R600_QUERY_GPU_SHADERS_BUSY
:
347 case R600_QUERY_GPU_TA_BUSY
:
348 case R600_QUERY_GPU_GDS_BUSY
:
349 case R600_QUERY_GPU_VGT_BUSY
:
350 case R600_QUERY_GPU_IA_BUSY
:
351 case R600_QUERY_GPU_SX_BUSY
:
352 case R600_QUERY_GPU_WD_BUSY
:
353 case R600_QUERY_GPU_BCI_BUSY
:
354 case R600_QUERY_GPU_SC_BUSY
:
355 case R600_QUERY_GPU_PA_BUSY
:
356 case R600_QUERY_GPU_DB_BUSY
:
357 case R600_QUERY_GPU_CP_BUSY
:
358 case R600_QUERY_GPU_CB_BUSY
:
359 case R600_QUERY_GPU_SDMA_BUSY
:
360 case R600_QUERY_GPU_PFP_BUSY
:
361 case R600_QUERY_GPU_MEQ_BUSY
:
362 case R600_QUERY_GPU_ME_BUSY
:
363 case R600_QUERY_GPU_SURF_SYNC_BUSY
:
364 case R600_QUERY_GPU_CP_DMA_BUSY
:
365 case R600_QUERY_GPU_SCRATCH_RAM_BUSY
:
366 query
->end_result
= r600_end_counter(rctx
->screen
,
368 query
->begin_result
);
369 query
->begin_result
= 0;
371 case R600_QUERY_NUM_COMPILATIONS
:
372 query
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
374 case R600_QUERY_NUM_SHADERS_CREATED
:
375 query
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
377 case R600_QUERY_NUM_SHADER_CACHE_HITS
:
379 p_atomic_read(&rctx
->screen
->num_shader_cache_hits
);
381 case R600_QUERY_GPIN_ASIC_ID
:
382 case R600_QUERY_GPIN_NUM_SIMD
:
383 case R600_QUERY_GPIN_NUM_RB
:
384 case R600_QUERY_GPIN_NUM_SPI
:
385 case R600_QUERY_GPIN_NUM_SE
:
388 unreachable("r600_query_sw_end: bad query type");
394 static bool r600_query_sw_get_result(struct r600_common_context
*rctx
,
395 struct r600_query
*rquery
,
397 union pipe_query_result
*result
)
399 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
401 switch (query
->b
.type
) {
402 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
403 /* Convert from cycles per millisecond to cycles per second (Hz). */
404 result
->timestamp_disjoint
.frequency
=
405 (uint64_t)rctx
->screen
->info
.clock_crystal_freq
* 1000;
406 result
->timestamp_disjoint
.disjoint
= false;
408 case PIPE_QUERY_GPU_FINISHED
: {
409 struct pipe_screen
*screen
= rctx
->b
.screen
;
410 struct pipe_context
*ctx
= rquery
->b
.flushed
? NULL
: &rctx
->b
;
412 result
->b
= screen
->fence_finish(screen
, ctx
, query
->fence
,
413 wait
? PIPE_TIMEOUT_INFINITE
: 0);
417 case R600_QUERY_GFX_BO_LIST_SIZE
:
418 result
->u64
= (query
->end_result
- query
->begin_result
) /
419 (query
->end_time
- query
->begin_time
);
421 case R600_QUERY_CS_THREAD_BUSY
:
422 case R600_QUERY_GALLIUM_THREAD_BUSY
:
423 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
424 (query
->end_time
- query
->begin_time
);
426 case R600_QUERY_GPIN_ASIC_ID
:
429 case R600_QUERY_GPIN_NUM_SIMD
:
430 result
->u32
= rctx
->screen
->info
.num_good_compute_units
;
432 case R600_QUERY_GPIN_NUM_RB
:
433 result
->u32
= rctx
->screen
->info
.num_render_backends
;
435 case R600_QUERY_GPIN_NUM_SPI
:
436 result
->u32
= 1; /* all supported chips have one SPI per SE */
438 case R600_QUERY_GPIN_NUM_SE
:
439 result
->u32
= rctx
->screen
->info
.max_se
;
443 result
->u64
= query
->end_result
- query
->begin_result
;
445 switch (query
->b
.type
) {
446 case R600_QUERY_BUFFER_WAIT_TIME
:
447 case R600_QUERY_GPU_TEMPERATURE
:
450 case R600_QUERY_CURRENT_GPU_SCLK
:
451 case R600_QUERY_CURRENT_GPU_MCLK
:
452 result
->u64
*= 1000000;
460 static struct r600_query_ops sw_query_ops
= {
461 .destroy
= r600_query_sw_destroy
,
462 .begin
= r600_query_sw_begin
,
463 .end
= r600_query_sw_end
,
464 .get_result
= r600_query_sw_get_result
,
465 .get_result_resource
= NULL
468 static struct pipe_query
*r600_query_sw_create(unsigned query_type
)
470 struct r600_query_sw
*query
;
472 query
= CALLOC_STRUCT(r600_query_sw
);
476 query
->b
.type
= query_type
;
477 query
->b
.ops
= &sw_query_ops
;
479 return (struct pipe_query
*)query
;
482 void r600_query_hw_destroy(struct r600_common_screen
*rscreen
,
483 struct r600_query
*rquery
)
485 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
486 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
488 /* Release all query buffers. */
490 struct r600_query_buffer
*qbuf
= prev
;
491 prev
= prev
->previous
;
492 r600_resource_reference(&qbuf
->buf
, NULL
);
496 r600_resource_reference(&query
->buffer
.buf
, NULL
);
500 static struct r600_resource
*r600_new_query_buffer(struct r600_common_screen
*rscreen
,
501 struct r600_query_hw
*query
)
503 unsigned buf_size
= MAX2(query
->result_size
,
504 rscreen
->info
.min_alloc_size
);
506 /* Queries are normally read by the CPU after
507 * being written by the gpu, hence staging is probably a good
510 struct r600_resource
*buf
= (struct r600_resource
*)
511 pipe_buffer_create(&rscreen
->b
, 0,
512 PIPE_USAGE_STAGING
, buf_size
);
516 if (!query
->ops
->prepare_buffer(rscreen
, query
, buf
)) {
517 r600_resource_reference(&buf
, NULL
);
524 static bool r600_query_hw_prepare_buffer(struct r600_common_screen
*rscreen
,
525 struct r600_query_hw
*query
,
526 struct r600_resource
*buffer
)
528 /* Callers ensure that the buffer is currently unused by the GPU. */
529 uint32_t *results
= rscreen
->ws
->buffer_map(buffer
->buf
, NULL
,
530 PIPE_TRANSFER_WRITE
|
531 PIPE_TRANSFER_UNSYNCHRONIZED
);
535 memset(results
, 0, buffer
->b
.b
.width0
);
537 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
538 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
539 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
540 unsigned max_rbs
= rscreen
->info
.num_render_backends
;
541 unsigned enabled_rb_mask
= rscreen
->info
.enabled_rb_mask
;
542 unsigned num_results
;
545 /* Set top bits for unused backends. */
546 num_results
= buffer
->b
.b
.width0
/ query
->result_size
;
547 for (j
= 0; j
< num_results
; j
++) {
548 for (i
= 0; i
< max_rbs
; i
++) {
549 if (!(enabled_rb_mask
& (1<<i
))) {
550 results
[(i
* 4)+1] = 0x80000000;
551 results
[(i
* 4)+3] = 0x80000000;
554 results
+= 4 * max_rbs
;
561 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
562 struct r600_query
*rquery
,
564 enum pipe_query_value_type result_type
,
566 struct pipe_resource
*resource
,
569 static struct r600_query_ops query_hw_ops
= {
570 .destroy
= r600_query_hw_destroy
,
571 .begin
= r600_query_hw_begin
,
572 .end
= r600_query_hw_end
,
573 .get_result
= r600_query_hw_get_result
,
574 .get_result_resource
= r600_query_hw_get_result_resource
,
577 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
578 struct r600_query_hw
*query
,
579 struct r600_resource
*buffer
,
581 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
582 struct r600_query_hw
*query
,
583 struct r600_resource
*buffer
,
585 static void r600_query_hw_add_result(struct r600_common_screen
*rscreen
,
586 struct r600_query_hw
*, void *buffer
,
587 union pipe_query_result
*result
);
588 static void r600_query_hw_clear_result(struct r600_query_hw
*,
589 union pipe_query_result
*);
591 static struct r600_query_hw_ops query_hw_default_hw_ops
= {
592 .prepare_buffer
= r600_query_hw_prepare_buffer
,
593 .emit_start
= r600_query_hw_do_emit_start
,
594 .emit_stop
= r600_query_hw_do_emit_stop
,
595 .clear_result
= r600_query_hw_clear_result
,
596 .add_result
= r600_query_hw_add_result
,
599 bool r600_query_hw_init(struct r600_common_screen
*rscreen
,
600 struct r600_query_hw
*query
)
602 query
->buffer
.buf
= r600_new_query_buffer(rscreen
, query
);
603 if (!query
->buffer
.buf
)
609 static struct pipe_query
*r600_query_hw_create(struct r600_common_screen
*rscreen
,
613 struct r600_query_hw
*query
= CALLOC_STRUCT(r600_query_hw
);
617 query
->b
.type
= query_type
;
618 query
->b
.ops
= &query_hw_ops
;
619 query
->ops
= &query_hw_default_hw_ops
;
621 switch (query_type
) {
622 case PIPE_QUERY_OCCLUSION_COUNTER
:
623 case PIPE_QUERY_OCCLUSION_PREDICATE
:
624 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
625 query
->result_size
= 16 * rscreen
->info
.num_render_backends
;
626 query
->result_size
+= 16; /* for the fence + alignment */
627 query
->num_cs_dw_begin
= 6;
628 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rscreen
);
630 case PIPE_QUERY_TIME_ELAPSED
:
631 query
->result_size
= 24;
632 query
->num_cs_dw_begin
= 8;
633 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rscreen
);
635 case PIPE_QUERY_TIMESTAMP
:
636 query
->result_size
= 16;
637 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rscreen
);
638 query
->flags
= R600_QUERY_HW_FLAG_NO_START
;
640 case PIPE_QUERY_PRIMITIVES_EMITTED
:
641 case PIPE_QUERY_PRIMITIVES_GENERATED
:
642 case PIPE_QUERY_SO_STATISTICS
:
643 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
644 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
645 query
->result_size
= 32;
646 query
->num_cs_dw_begin
= 6;
647 query
->num_cs_dw_end
= 6;
648 query
->stream
= index
;
650 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
651 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
652 query
->result_size
= 32 * R600_MAX_STREAMS
;
653 query
->num_cs_dw_begin
= 6 * R600_MAX_STREAMS
;
654 query
->num_cs_dw_end
= 6 * R600_MAX_STREAMS
;
656 case PIPE_QUERY_PIPELINE_STATISTICS
:
657 /* 11 values on EG, 8 on R600. */
658 query
->result_size
= (rscreen
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
659 query
->result_size
+= 8; /* for the fence + alignment */
660 query
->num_cs_dw_begin
= 6;
661 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rscreen
);
669 if (!r600_query_hw_init(rscreen
, query
)) {
674 return (struct pipe_query
*)query
;
677 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
678 unsigned type
, int diff
)
680 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
681 type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
682 type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
683 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
684 bool old_perfect_enable
=
685 rctx
->num_perfect_occlusion_queries
!= 0;
686 bool enable
, perfect_enable
;
688 rctx
->num_occlusion_queries
+= diff
;
689 assert(rctx
->num_occlusion_queries
>= 0);
691 if (type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
692 rctx
->num_perfect_occlusion_queries
+= diff
;
693 assert(rctx
->num_perfect_occlusion_queries
>= 0);
696 enable
= rctx
->num_occlusion_queries
!= 0;
697 perfect_enable
= rctx
->num_perfect_occlusion_queries
!= 0;
699 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
700 struct r600_context
*ctx
= (struct r600_context
*)rctx
;
701 r600_mark_atom_dirty(ctx
, &ctx
->db_misc_state
.atom
);
706 static unsigned event_type_for_stream(unsigned stream
)
710 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
711 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
712 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
713 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
717 static void emit_sample_streamout(struct radeon_cmdbuf
*cs
, uint64_t va
,
720 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
721 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(stream
)) | EVENT_INDEX(3));
723 radeon_emit(cs
, va
>> 32);
726 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
727 struct r600_query_hw
*query
,
728 struct r600_resource
*buffer
,
731 struct radeon_cmdbuf
*cs
= ctx
->gfx
.cs
;
733 switch (query
->b
.type
) {
734 case PIPE_QUERY_OCCLUSION_COUNTER
:
735 case PIPE_QUERY_OCCLUSION_PREDICATE
:
736 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
737 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
738 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
740 radeon_emit(cs
, va
>> 32);
742 case PIPE_QUERY_PRIMITIVES_EMITTED
:
743 case PIPE_QUERY_PRIMITIVES_GENERATED
:
744 case PIPE_QUERY_SO_STATISTICS
:
745 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
746 emit_sample_streamout(cs
, va
, query
->stream
);
748 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
749 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
)
750 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
752 case PIPE_QUERY_TIME_ELAPSED
:
753 /* Write the timestamp after the last draw is done.
756 r600_gfx_write_event_eop(ctx
, EVENT_TYPE_BOTTOM_OF_PIPE_TS
,
757 0, EOP_DATA_SEL_TIMESTAMP
,
758 NULL
, va
, 0, query
->b
.type
);
760 case PIPE_QUERY_PIPELINE_STATISTICS
:
761 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
762 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
764 radeon_emit(cs
, va
>> 32);
769 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
773 static void r600_query_hw_emit_start(struct r600_common_context
*ctx
,
774 struct r600_query_hw
*query
)
778 if (!query
->buffer
.buf
)
779 return; // previous buffer allocation failure
781 r600_update_occlusion_query_state(ctx
, query
->b
.type
, 1);
782 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, 1);
784 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_begin
+ query
->num_cs_dw_end
,
787 /* Get a new query buffer if needed. */
788 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
789 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
790 *qbuf
= query
->buffer
;
791 query
->buffer
.results_end
= 0;
792 query
->buffer
.previous
= qbuf
;
793 query
->buffer
.buf
= r600_new_query_buffer(ctx
->screen
, query
);
794 if (!query
->buffer
.buf
)
798 /* emit begin query */
799 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
801 query
->ops
->emit_start(ctx
, query
, query
->buffer
.buf
, va
);
803 ctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
806 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
807 struct r600_query_hw
*query
,
808 struct r600_resource
*buffer
,
811 struct radeon_cmdbuf
*cs
= ctx
->gfx
.cs
;
812 uint64_t fence_va
= 0;
814 switch (query
->b
.type
) {
815 case PIPE_QUERY_OCCLUSION_COUNTER
:
816 case PIPE_QUERY_OCCLUSION_PREDICATE
:
817 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
819 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
820 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
822 radeon_emit(cs
, va
>> 32);
824 fence_va
= va
+ ctx
->screen
->info
.num_render_backends
* 16 - 8;
826 case PIPE_QUERY_PRIMITIVES_EMITTED
:
827 case PIPE_QUERY_PRIMITIVES_GENERATED
:
828 case PIPE_QUERY_SO_STATISTICS
:
829 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
831 emit_sample_streamout(cs
, va
, query
->stream
);
833 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
835 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
)
836 emit_sample_streamout(cs
, va
+ 32 * stream
, stream
);
838 case PIPE_QUERY_TIME_ELAPSED
:
841 case PIPE_QUERY_TIMESTAMP
:
842 r600_gfx_write_event_eop(ctx
, EVENT_TYPE_BOTTOM_OF_PIPE_TS
,
843 0, EOP_DATA_SEL_TIMESTAMP
, NULL
, va
,
847 case PIPE_QUERY_PIPELINE_STATISTICS
: {
848 unsigned sample_size
= (query
->result_size
- 8) / 2;
851 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
852 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
854 radeon_emit(cs
, va
>> 32);
856 fence_va
= va
+ sample_size
;
862 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
866 r600_gfx_write_event_eop(ctx
, EVENT_TYPE_BOTTOM_OF_PIPE_TS
, 0,
867 EOP_DATA_SEL_VALUE_32BIT
,
868 query
->buffer
.buf
, fence_va
, 0x80000000,
872 static void r600_query_hw_emit_stop(struct r600_common_context
*ctx
,
873 struct r600_query_hw
*query
)
877 if (!query
->buffer
.buf
)
878 return; // previous buffer allocation failure
880 /* The queries which need begin already called this in begin_query. */
881 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
882 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_end
, false);
886 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
888 query
->ops
->emit_stop(ctx
, query
, query
->buffer
.buf
, va
);
890 query
->buffer
.results_end
+= query
->result_size
;
892 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
893 ctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
895 r600_update_occlusion_query_state(ctx
, query
->b
.type
, -1);
896 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, -1);
899 static void emit_set_predicate(struct r600_common_context
*ctx
,
900 struct r600_resource
*buf
, uint64_t va
,
903 struct radeon_cmdbuf
*cs
= ctx
->gfx
.cs
;
905 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
907 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
908 r600_emit_reloc(ctx
, &ctx
->gfx
, buf
, RADEON_USAGE_READ
,
912 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
913 struct r600_atom
*atom
)
915 struct r600_query_hw
*query
= (struct r600_query_hw
*)ctx
->render_cond
;
916 struct r600_query_buffer
*qbuf
;
918 bool flag_wait
, invert
;
923 invert
= ctx
->render_cond_invert
;
924 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
925 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
927 switch (query
->b
.type
) {
928 case PIPE_QUERY_OCCLUSION_COUNTER
:
929 case PIPE_QUERY_OCCLUSION_PREDICATE
:
930 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
931 op
= PRED_OP(PREDICATION_OP_ZPASS
);
933 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
934 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
935 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
943 /* if true then invert, see GL_ARB_conditional_render_inverted */
945 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visible or overflow */
947 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visible or no overflow */
949 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
951 /* emit predicate packets for all data blocks */
952 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
953 unsigned results_base
= 0;
954 uint64_t va_base
= qbuf
->buf
->gpu_address
;
956 while (results_base
< qbuf
->results_end
) {
957 uint64_t va
= va_base
+ results_base
;
959 if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
) {
960 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
) {
961 emit_set_predicate(ctx
, qbuf
->buf
, va
+ 32 * stream
, op
);
963 /* set CONTINUE bit for all packets except the first */
964 op
|= PREDICATION_CONTINUE
;
967 emit_set_predicate(ctx
, qbuf
->buf
, va
, op
);
968 op
|= PREDICATION_CONTINUE
;
971 results_base
+= query
->result_size
;
976 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
978 struct r600_common_screen
*rscreen
=
979 (struct r600_common_screen
*)ctx
->screen
;
981 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
982 query_type
== PIPE_QUERY_GPU_FINISHED
||
983 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
984 return r600_query_sw_create(query_type
);
986 return r600_query_hw_create(rscreen
, query_type
, index
);
989 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
991 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
992 struct r600_query
*rquery
= (struct r600_query
*)query
;
994 rquery
->ops
->destroy(rctx
->screen
, rquery
);
997 static bool r600_begin_query(struct pipe_context
*ctx
,
998 struct pipe_query
*query
)
1000 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1001 struct r600_query
*rquery
= (struct r600_query
*)query
;
1003 return rquery
->ops
->begin(rctx
, rquery
);
1006 void r600_query_hw_reset_buffers(struct r600_common_context
*rctx
,
1007 struct r600_query_hw
*query
)
1009 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
1011 /* Discard the old query buffers. */
1013 struct r600_query_buffer
*qbuf
= prev
;
1014 prev
= prev
->previous
;
1015 r600_resource_reference(&qbuf
->buf
, NULL
);
1019 query
->buffer
.results_end
= 0;
1020 query
->buffer
.previous
= NULL
;
1022 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1023 if (r600_rings_is_buffer_referenced(rctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
1024 !rctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
1025 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1026 query
->buffer
.buf
= r600_new_query_buffer(rctx
->screen
, query
);
1028 if (!query
->ops
->prepare_buffer(rctx
->screen
, query
, query
->buffer
.buf
))
1029 r600_resource_reference(&query
->buffer
.buf
, NULL
);
1033 bool r600_query_hw_begin(struct r600_common_context
*rctx
,
1034 struct r600_query
*rquery
)
1036 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1038 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
1043 if (!(query
->flags
& R600_QUERY_HW_FLAG_BEGIN_RESUMES
))
1044 r600_query_hw_reset_buffers(rctx
, query
);
1046 r600_query_hw_emit_start(rctx
, query
);
1047 if (!query
->buffer
.buf
)
1050 list_addtail(&query
->list
, &rctx
->active_queries
);
1054 static bool r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
1056 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1057 struct r600_query
*rquery
= (struct r600_query
*)query
;
1059 return rquery
->ops
->end(rctx
, rquery
);
1062 bool r600_query_hw_end(struct r600_common_context
*rctx
,
1063 struct r600_query
*rquery
)
1065 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1067 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
)
1068 r600_query_hw_reset_buffers(rctx
, query
);
1070 r600_query_hw_emit_stop(rctx
, query
);
1072 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
1073 list_delinit(&query
->list
);
1075 if (!query
->buffer
.buf
)
1081 static void r600_get_hw_query_params(struct r600_common_context
*rctx
,
1082 struct r600_query_hw
*rquery
, int index
,
1083 struct r600_hw_query_params
*params
)
1085 unsigned max_rbs
= rctx
->screen
->info
.num_render_backends
;
1087 params
->pair_stride
= 0;
1088 params
->pair_count
= 1;
1090 switch (rquery
->b
.type
) {
1091 case PIPE_QUERY_OCCLUSION_COUNTER
:
1092 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1093 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
1094 params
->start_offset
= 0;
1095 params
->end_offset
= 8;
1096 params
->fence_offset
= max_rbs
* 16;
1097 params
->pair_stride
= 16;
1098 params
->pair_count
= max_rbs
;
1100 case PIPE_QUERY_TIME_ELAPSED
:
1101 params
->start_offset
= 0;
1102 params
->end_offset
= 8;
1103 params
->fence_offset
= 16;
1105 case PIPE_QUERY_TIMESTAMP
:
1106 params
->start_offset
= 0;
1107 params
->end_offset
= 0;
1108 params
->fence_offset
= 8;
1110 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1111 params
->start_offset
= 8;
1112 params
->end_offset
= 24;
1113 params
->fence_offset
= params
->end_offset
+ 4;
1115 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1116 params
->start_offset
= 0;
1117 params
->end_offset
= 16;
1118 params
->fence_offset
= params
->end_offset
+ 4;
1120 case PIPE_QUERY_SO_STATISTICS
:
1121 params
->start_offset
= 8 - index
* 8;
1122 params
->end_offset
= 24 - index
* 8;
1123 params
->fence_offset
= params
->end_offset
+ 4;
1125 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1126 params
->pair_count
= R600_MAX_STREAMS
;
1127 params
->pair_stride
= 32;
1129 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1130 params
->start_offset
= 0;
1131 params
->end_offset
= 16;
1133 /* We can re-use the high dword of the last 64-bit value as a
1134 * fence: it is initialized as 0, and the high bit is set by
1135 * the write of the streamout stats event.
1137 params
->fence_offset
= rquery
->result_size
- 4;
1139 case PIPE_QUERY_PIPELINE_STATISTICS
:
1141 /* Offsets apply to EG+ */
1142 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1143 params
->start_offset
= offsets
[index
];
1144 params
->end_offset
= 88 + offsets
[index
];
1145 params
->fence_offset
= 2 * 88;
1149 unreachable("r600_get_hw_query_params unsupported");
1153 static unsigned r600_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1154 bool test_status_bit
)
1156 uint32_t *current_result
= (uint32_t*)map
;
1157 uint64_t start
, end
;
1159 start
= (uint64_t)current_result
[start_index
] |
1160 (uint64_t)current_result
[start_index
+1] << 32;
1161 end
= (uint64_t)current_result
[end_index
] |
1162 (uint64_t)current_result
[end_index
+1] << 32;
1164 if (!test_status_bit
||
1165 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1171 static void r600_query_hw_add_result(struct r600_common_screen
*rscreen
,
1172 struct r600_query_hw
*query
,
1174 union pipe_query_result
*result
)
1176 unsigned max_rbs
= rscreen
->info
.num_render_backends
;
1178 switch (query
->b
.type
) {
1179 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1180 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1181 unsigned results_base
= i
* 16;
1183 r600_query_read_result(buffer
+ results_base
, 0, 2, true);
1187 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1188 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1189 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1190 unsigned results_base
= i
* 16;
1191 result
->b
= result
->b
||
1192 r600_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1196 case PIPE_QUERY_TIME_ELAPSED
:
1197 result
->u64
+= r600_query_read_result(buffer
, 0, 2, false);
1199 case PIPE_QUERY_TIMESTAMP
:
1200 result
->u64
= *(uint64_t*)buffer
;
1202 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1203 /* SAMPLE_STREAMOUTSTATS stores this structure:
1205 * u64 NumPrimitivesWritten;
1206 * u64 PrimitiveStorageNeeded;
1208 * We only need NumPrimitivesWritten here. */
1209 result
->u64
+= r600_query_read_result(buffer
, 2, 6, true);
1211 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1212 /* Here we read PrimitiveStorageNeeded. */
1213 result
->u64
+= r600_query_read_result(buffer
, 0, 4, true);
1215 case PIPE_QUERY_SO_STATISTICS
:
1216 result
->so_statistics
.num_primitives_written
+=
1217 r600_query_read_result(buffer
, 2, 6, true);
1218 result
->so_statistics
.primitives_storage_needed
+=
1219 r600_query_read_result(buffer
, 0, 4, true);
1221 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1222 result
->b
= result
->b
||
1223 r600_query_read_result(buffer
, 2, 6, true) !=
1224 r600_query_read_result(buffer
, 0, 4, true);
1226 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
1227 for (unsigned stream
= 0; stream
< R600_MAX_STREAMS
; ++stream
) {
1228 result
->b
= result
->b
||
1229 r600_query_read_result(buffer
, 2, 6, true) !=
1230 r600_query_read_result(buffer
, 0, 4, true);
1231 buffer
= (char *)buffer
+ 32;
1234 case PIPE_QUERY_PIPELINE_STATISTICS
:
1235 if (rscreen
->chip_class
>= EVERGREEN
) {
1236 result
->pipeline_statistics
.ps_invocations
+=
1237 r600_query_read_result(buffer
, 0, 22, false);
1238 result
->pipeline_statistics
.c_primitives
+=
1239 r600_query_read_result(buffer
, 2, 24, false);
1240 result
->pipeline_statistics
.c_invocations
+=
1241 r600_query_read_result(buffer
, 4, 26, false);
1242 result
->pipeline_statistics
.vs_invocations
+=
1243 r600_query_read_result(buffer
, 6, 28, false);
1244 result
->pipeline_statistics
.gs_invocations
+=
1245 r600_query_read_result(buffer
, 8, 30, false);
1246 result
->pipeline_statistics
.gs_primitives
+=
1247 r600_query_read_result(buffer
, 10, 32, false);
1248 result
->pipeline_statistics
.ia_primitives
+=
1249 r600_query_read_result(buffer
, 12, 34, false);
1250 result
->pipeline_statistics
.ia_vertices
+=
1251 r600_query_read_result(buffer
, 14, 36, false);
1252 result
->pipeline_statistics
.hs_invocations
+=
1253 r600_query_read_result(buffer
, 16, 38, false);
1254 result
->pipeline_statistics
.ds_invocations
+=
1255 r600_query_read_result(buffer
, 18, 40, false);
1256 result
->pipeline_statistics
.cs_invocations
+=
1257 r600_query_read_result(buffer
, 20, 42, false);
1259 result
->pipeline_statistics
.ps_invocations
+=
1260 r600_query_read_result(buffer
, 0, 16, false);
1261 result
->pipeline_statistics
.c_primitives
+=
1262 r600_query_read_result(buffer
, 2, 18, false);
1263 result
->pipeline_statistics
.c_invocations
+=
1264 r600_query_read_result(buffer
, 4, 20, false);
1265 result
->pipeline_statistics
.vs_invocations
+=
1266 r600_query_read_result(buffer
, 6, 22, false);
1267 result
->pipeline_statistics
.gs_invocations
+=
1268 r600_query_read_result(buffer
, 8, 24, false);
1269 result
->pipeline_statistics
.gs_primitives
+=
1270 r600_query_read_result(buffer
, 10, 26, false);
1271 result
->pipeline_statistics
.ia_primitives
+=
1272 r600_query_read_result(buffer
, 12, 28, false);
1273 result
->pipeline_statistics
.ia_vertices
+=
1274 r600_query_read_result(buffer
, 14, 30, false);
1276 #if 0 /* for testing */
1277 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1278 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1279 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1280 result
->pipeline_statistics
.ia_vertices
,
1281 result
->pipeline_statistics
.ia_primitives
,
1282 result
->pipeline_statistics
.vs_invocations
,
1283 result
->pipeline_statistics
.hs_invocations
,
1284 result
->pipeline_statistics
.ds_invocations
,
1285 result
->pipeline_statistics
.gs_invocations
,
1286 result
->pipeline_statistics
.gs_primitives
,
1287 result
->pipeline_statistics
.c_invocations
,
1288 result
->pipeline_statistics
.c_primitives
,
1289 result
->pipeline_statistics
.ps_invocations
,
1290 result
->pipeline_statistics
.cs_invocations
);
1298 static bool r600_get_query_result(struct pipe_context
*ctx
,
1299 struct pipe_query
*query
, bool wait
,
1300 union pipe_query_result
*result
)
1302 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1303 struct r600_query
*rquery
= (struct r600_query
*)query
;
1305 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
1308 static void r600_get_query_result_resource(struct pipe_context
*ctx
,
1309 struct pipe_query
*query
,
1311 enum pipe_query_value_type result_type
,
1313 struct pipe_resource
*resource
,
1316 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1317 struct r600_query
*rquery
= (struct r600_query
*)query
;
1319 rquery
->ops
->get_result_resource(rctx
, rquery
, wait
, result_type
, index
,
1323 static void r600_query_hw_clear_result(struct r600_query_hw
*query
,
1324 union pipe_query_result
*result
)
1326 util_query_clear_result(result
, query
->b
.type
);
1329 bool r600_query_hw_get_result(struct r600_common_context
*rctx
,
1330 struct r600_query
*rquery
,
1331 bool wait
, union pipe_query_result
*result
)
1333 struct r600_common_screen
*rscreen
= rctx
->screen
;
1334 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1335 struct r600_query_buffer
*qbuf
;
1337 query
->ops
->clear_result(query
, result
);
1339 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1340 unsigned usage
= PIPE_TRANSFER_READ
|
1341 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
);
1342 unsigned results_base
= 0;
1345 if (rquery
->b
.flushed
)
1346 map
= rctx
->ws
->buffer_map(qbuf
->buf
->buf
, NULL
, usage
);
1348 map
= r600_buffer_map_sync_with_rings(rctx
, qbuf
->buf
, usage
);
1353 while (results_base
!= qbuf
->results_end
) {
1354 query
->ops
->add_result(rscreen
, query
, map
+ results_base
,
1356 results_base
+= query
->result_size
;
1360 /* Convert the time to expected units. */
1361 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1362 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1363 result
->u64
= (1000000 * result
->u64
) / rscreen
->info
.clock_crystal_freq
;
1368 /* Create the compute shader that is used to collect the results.
1370 * One compute grid with a single thread is launched for every query result
1371 * buffer. The thread (optionally) reads a previous summary buffer, then
1372 * accumulates data from the query result buffer, and writes the result either
1373 * to a summary buffer to be consumed by the next grid invocation or to the
1374 * user-supplied buffer.
1380 * 0.y = result_stride
1381 * 0.z = result_count
1383 * 1: read previously accumulated values
1384 * 2: write accumulated values for chaining
1385 * 4: write result available
1386 * 8: convert result to boolean (0/1)
1387 * 16: only read one dword and use that as result
1388 * 32: apply timestamp conversion
1389 * 64: store full 64 bits result
1390 * 128: store signed 32 bits result
1391 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1392 * 1.x = fence_offset
1395 * 1.w = result_offset
1396 * 2.x = buffer0 offset
1398 * BUFFER[0] = query result buffer
1399 * BUFFER[1] = previous summary buffer
1400 * BUFFER[2] = next summary buffer or user-supplied buffer
1402 static void r600_create_query_result_shader(struct r600_common_context
*rctx
)
1404 /* TEMP[0].xy = accumulated result so far
1405 * TEMP[0].z = result not available
1407 * TEMP[1].x = current result index
1408 * TEMP[1].y = current pair index
1410 static const char text_tmpl
[] =
1412 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1413 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1414 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1418 "DCL CONST[0][0..2]\n"
1420 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1421 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1422 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1423 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1424 "IMM[4] UINT32 {256, 0, 0, 0}\n"
1426 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
1428 /* Check result availability. */
1429 "UADD TEMP[1].x, CONST[0][1].xxxx, CONST[0][2].xxxx\n"
1430 "LOAD TEMP[1].x, BUFFER[0], TEMP[1].xxxx\n"
1431 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1432 "MOV TEMP[1], TEMP[0].zzzz\n"
1433 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1435 /* Load result if available. */
1437 "UADD TEMP[0].x, IMM[0].xxxx, CONST[0][2].xxxx\n"
1438 "LOAD TEMP[0].xy, BUFFER[0], TEMP[0].xxxx\n"
1441 /* Load previously accumulated result if requested. */
1442 "MOV TEMP[0], IMM[0].xxxx\n"
1443 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
1445 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1448 "MOV TEMP[1].x, IMM[0].xxxx\n"
1450 /* Break if accumulated result so far is not available. */
1451 "UIF TEMP[0].zzzz\n"
1455 /* Break if result_index >= result_count. */
1456 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
1461 /* Load fence and check result availability */
1462 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
1463 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1464 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1465 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1466 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1467 "UIF TEMP[0].zzzz\n"
1471 "MOV TEMP[1].y, IMM[0].xxxx\n"
1473 /* Load start and end. */
1474 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
1475 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
1476 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n"
1477 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1479 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
1480 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1482 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1484 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
1485 "UIF TEMP[5].zzzz\n"
1486 /* Load second start/end half-pair and
1487 * take the difference
1489 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1490 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1491 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1493 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1494 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1497 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1499 /* Increment pair index */
1500 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1501 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
1507 /* Increment result index */
1508 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1512 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
1514 /* Store accumulated data for chaining. */
1515 "STORE BUFFER[2].xyz, CONST[0][1].wwww, TEMP[0]\n"
1517 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
1519 /* Store result availability. */
1520 "NOT TEMP[0].z, TEMP[0]\n"
1521 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1522 "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].zzzz\n"
1524 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1526 "STORE BUFFER[2].y, CONST[0][1].wwww, IMM[0].xxxx\n"
1529 /* Store result if it is available. */
1530 "NOT TEMP[4], TEMP[0].zzzz\n"
1532 /* Apply timestamp conversion */
1533 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
1535 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1536 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1539 /* Convert to boolean */
1540 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
1542 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1543 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1544 "MOV TEMP[0].y, IMM[0].xxxx\n"
1547 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
1549 "STORE BUFFER[2].xy, CONST[0][1].wwww, TEMP[0].xyxy\n"
1552 "UIF TEMP[0].yyyy\n"
1553 "MOV TEMP[0].x, IMM[0].wwww\n"
1556 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
1558 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1561 "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].xxxx\n"
1569 char text
[sizeof(text_tmpl
) + 32];
1570 struct tgsi_token tokens
[1024];
1571 struct pipe_compute_state state
= {};
1573 /* Hard code the frequency into the shader so that the backend can
1574 * use the full range of optimizations for divide-by-constant.
1576 snprintf(text
, sizeof(text
), text_tmpl
,
1577 rctx
->screen
->info
.clock_crystal_freq
);
1579 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
1584 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
1585 state
.prog
= tokens
;
1587 rctx
->query_result_shader
= rctx
->b
.create_compute_state(&rctx
->b
, &state
);
1590 static void r600_restore_qbo_state(struct r600_common_context
*rctx
,
1591 struct r600_qbo_state
*st
)
1593 rctx
->b
.bind_compute_state(&rctx
->b
, st
->saved_compute
);
1595 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1596 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1598 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
, ~0);
1599 for (unsigned i
= 0; i
< 3; ++i
)
1600 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1603 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
1604 struct r600_query
*rquery
,
1606 enum pipe_query_value_type result_type
,
1608 struct pipe_resource
*resource
,
1611 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1612 struct r600_query_buffer
*qbuf
;
1613 struct r600_query_buffer
*qbuf_prev
;
1614 struct pipe_resource
*tmp_buffer
= NULL
;
1615 unsigned tmp_buffer_offset
= 0;
1616 struct r600_qbo_state saved_state
= {};
1617 struct pipe_grid_info grid
= {};
1618 struct pipe_constant_buffer constant_buffer
= {};
1619 struct pipe_shader_buffer ssbo
[3];
1620 struct r600_hw_query_params params
;
1622 uint32_t end_offset
;
1623 uint32_t result_stride
;
1624 uint32_t result_count
;
1626 uint32_t fence_offset
;
1627 uint32_t pair_stride
;
1628 uint32_t pair_count
;
1629 uint32_t buffer_offset
;
1630 uint32_t buffer0_offset
;
1633 if (!rctx
->query_result_shader
) {
1634 r600_create_query_result_shader(rctx
);
1635 if (!rctx
->query_result_shader
)
1639 if (query
->buffer
.previous
) {
1640 u_suballocator_alloc(rctx
->allocator_zeroed_memory
, 16, 256,
1641 &tmp_buffer_offset
, &tmp_buffer
);
1646 rctx
->save_qbo_state(&rctx
->b
, &saved_state
);
1648 r600_get_hw_query_params(rctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1649 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1650 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1651 consts
.result_stride
= query
->result_size
;
1652 consts
.pair_stride
= params
.pair_stride
;
1653 consts
.pair_count
= params
.pair_count
;
1655 constant_buffer
.buffer_size
= sizeof(consts
);
1656 constant_buffer
.user_buffer
= &consts
;
1658 ssbo
[1].buffer
= tmp_buffer
;
1659 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1660 ssbo
[1].buffer_size
= 16;
1664 rctx
->b
.bind_compute_state(&rctx
->b
, rctx
->query_result_shader
);
1676 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1677 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
)
1679 else if (query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
||
1680 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1681 consts
.config
|= 8 | 256;
1682 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1683 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1684 consts
.config
|= 32;
1686 switch (result_type
) {
1687 case PIPE_QUERY_TYPE_U64
:
1688 case PIPE_QUERY_TYPE_I64
:
1689 consts
.config
|= 64;
1691 case PIPE_QUERY_TYPE_I32
:
1692 consts
.config
|= 128;
1694 case PIPE_QUERY_TYPE_U32
:
1698 rctx
->flags
|= rctx
->screen
->barrier_flags
.cp_to_L2
;
1700 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1701 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1702 qbuf_prev
= qbuf
->previous
;
1703 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1704 consts
.config
&= ~3;
1705 if (qbuf
!= &query
->buffer
)
1710 /* Only read the last timestamp. */
1712 consts
.result_count
= 0;
1713 consts
.config
|= 16;
1714 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1717 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1718 ssbo
[0].buffer_offset
= params
.start_offset
& ~0xff;
1719 ssbo
[0].buffer_size
= qbuf
->results_end
- ssbo
[0].buffer_offset
;
1720 consts
.buffer0_offset
= (params
.start_offset
& 0xff);
1721 if (!qbuf
->previous
) {
1723 ssbo
[2].buffer
= resource
;
1724 ssbo
[2].buffer_offset
= offset
& ~0xff;
1725 ssbo
[2].buffer_size
= offset
+ 8;
1726 consts
.buffer_offset
= (offset
& 0xff);
1728 consts
.buffer_offset
= 0;
1730 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1732 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
, ~0);
1734 if (wait
&& qbuf
== &query
->buffer
) {
1737 /* Wait for result availability. Wait only for readiness
1738 * of the last entry, since the fence writes should be
1739 * serialized in the CP.
1741 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1742 va
+= params
.fence_offset
;
1744 r600_gfx_wait_fence(rctx
, qbuf
->buf
, va
, 0x80000000, 0x80000000);
1747 rctx
->b
.launch_grid(&rctx
->b
, &grid
);
1748 rctx
->flags
|= rctx
->screen
->barrier_flags
.compute_to_L2
;
1751 r600_restore_qbo_state(rctx
, &saved_state
);
1752 pipe_resource_reference(&tmp_buffer
, NULL
);
1755 static void r600_render_condition(struct pipe_context
*ctx
,
1756 struct pipe_query
*query
,
1758 enum pipe_render_cond_flag mode
)
1760 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1761 struct r600_query_hw
*rquery
= (struct r600_query_hw
*)query
;
1762 struct r600_query_buffer
*qbuf
;
1763 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
1765 /* Compute the size of SET_PREDICATION packets. */
1768 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
1769 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
1771 if (rquery
->b
.type
== PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
)
1772 atom
->num_dw
*= R600_MAX_STREAMS
;
1775 rctx
->render_cond
= query
;
1776 rctx
->render_cond_invert
= condition
;
1777 rctx
->render_cond_mode
= mode
;
1779 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
1782 void r600_suspend_queries(struct r600_common_context
*ctx
)
1784 struct r600_query_hw
*query
;
1786 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1787 r600_query_hw_emit_stop(ctx
, query
);
1789 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1792 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
1793 struct list_head
*query_list
)
1795 struct r600_query_hw
*query
;
1796 unsigned num_dw
= 0;
1798 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
1800 num_dw
+= query
->num_cs_dw_begin
+ query
->num_cs_dw_end
;
1802 /* Workaround for the fact that
1803 * num_cs_dw_nontimer_queries_suspend is incremented for every
1804 * resumed query, which raises the bar in need_cs_space for
1805 * queries about to be resumed.
1807 num_dw
+= query
->num_cs_dw_end
;
1809 /* primitives generated query */
1810 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
1811 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1817 void r600_resume_queries(struct r600_common_context
*ctx
)
1819 struct r600_query_hw
*query
;
1820 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, &ctx
->active_queries
);
1822 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1824 /* Check CS space here. Resuming must not be interrupted by flushes. */
1825 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, true);
1827 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1828 r600_query_hw_emit_start(ctx
, query
);
1832 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1833 void r600_query_fix_enabled_rb_mask(struct r600_common_screen
*rscreen
)
1835 struct r600_common_context
*ctx
=
1836 (struct r600_common_context
*)rscreen
->aux_context
;
1837 struct radeon_cmdbuf
*cs
= ctx
->gfx
.cs
;
1838 struct r600_resource
*buffer
;
1840 unsigned i
, mask
= 0;
1843 if (ctx
->family
== CHIP_JUNIPER
) {
1845 * Fix for predication lockups - the chip can only ever have
1846 * 4 RBs, however it looks like the predication logic assumes
1847 * there's 8, trying to read results from query buffers never
1848 * written to. By increasing this number we'll write the
1849 * status bit for these as per the normal disabled rb logic.
1851 ctx
->screen
->info
.num_render_backends
= 8;
1853 max_rbs
= ctx
->screen
->info
.num_render_backends
;
1855 assert(rscreen
->chip_class
<= CAYMAN
);
1858 * if backend_map query is supported by the kernel.
1859 * Note the kernel drm driver for a long time never filled in the
1860 * associated data on eg/cm, only on r600/r700, hence ignore the valid
1861 * bit there if the map is zero.
1862 * (Albeit some chips with just one active rb can have a valid 0 map.)
1864 if (rscreen
->info
.r600_gb_backend_map_valid
&&
1865 (ctx
->chip_class
< EVERGREEN
|| rscreen
->info
.r600_gb_backend_map
!= 0)) {
1866 unsigned num_tile_pipes
= rscreen
->info
.num_tile_pipes
;
1867 unsigned backend_map
= rscreen
->info
.r600_gb_backend_map
;
1868 unsigned item_width
, item_mask
;
1870 if (ctx
->chip_class
>= EVERGREEN
) {
1878 while (num_tile_pipes
--) {
1879 i
= backend_map
& item_mask
;
1881 backend_map
>>= item_width
;
1884 rscreen
->info
.enabled_rb_mask
= mask
;
1889 /* otherwise backup path for older kernels */
1891 /* create buffer for event data */
1892 buffer
= (struct r600_resource
*)
1893 pipe_buffer_create(ctx
->b
.screen
, 0,
1894 PIPE_USAGE_STAGING
, max_rbs
* 16);
1898 /* initialize buffer with zeroes */
1899 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
1901 memset(results
, 0, max_rbs
* 4 * 4);
1903 /* emit EVENT_WRITE for ZPASS_DONE */
1904 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1905 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
1906 radeon_emit(cs
, buffer
->gpu_address
);
1907 radeon_emit(cs
, buffer
->gpu_address
>> 32);
1909 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
1910 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
1912 /* analyze results */
1913 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
1915 for(i
= 0; i
< max_rbs
; i
++) {
1916 /* at least highest bit will be set if backend is used */
1917 if (results
[i
*4 + 1])
1923 r600_resource_reference(&buffer
, NULL
);
1926 if (rscreen
->debug_flags
& DBG_INFO
&&
1927 mask
!= rscreen
->info
.enabled_rb_mask
) {
1928 printf("enabled_rb_mask (fixed) = 0x%x\n", mask
);
1930 rscreen
->info
.enabled_rb_mask
= mask
;
1934 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1937 .query_type = R600_QUERY_##query_type_, \
1938 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1939 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1940 .group_id = group_id_ \
1943 #define X(name_, query_type_, type_, result_type_) \
1944 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1946 #define XG(group_, name_, query_type_, type_, result_type_) \
1947 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1949 static const struct pipe_driver_query_info r600_driver_query_list
[] = {
1950 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1951 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1952 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1953 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1954 X("decompress-calls", DECOMPRESS_CALLS
, UINT64
, AVERAGE
),
1955 X("MRT-draw-calls", MRT_DRAW_CALLS
, UINT64
, AVERAGE
),
1956 X("prim-restart-calls", PRIM_RESTART_CALLS
, UINT64
, AVERAGE
),
1957 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1958 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1959 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1960 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1961 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1962 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1963 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1964 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1965 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1966 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1967 X("num-resident-handles", NUM_RESIDENT_HANDLES
, UINT64
, AVERAGE
),
1968 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS
, UINT64
, AVERAGE
),
1969 X("tc-direct-slots", TC_DIRECT_SLOTS
, UINT64
, AVERAGE
),
1970 X("tc-num-syncs", TC_NUM_SYNCS
, UINT64
, AVERAGE
),
1971 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1972 X("gallium-thread-busy", GALLIUM_THREAD_BUSY
, UINT64
, AVERAGE
),
1973 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1974 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1975 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1976 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1977 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1978 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1979 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1980 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1981 X("GFX-BO-list-size", GFX_BO_LIST_SIZE
, UINT64
, AVERAGE
),
1982 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1983 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1984 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS
, UINT64
, CUMULATIVE
),
1985 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1986 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1987 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1989 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1990 * which use it as a fallback path to detect the GPU type.
1992 * Note: The names of these queries are significant for GPUPerfStudio
1993 * (and possibly their order as well). */
1994 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1995 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1996 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1997 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1998 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
2000 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
2001 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
2002 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
2004 /* The following queries must be at the end of the list because their
2005 * availability is adjusted dynamically based on the DRM version. */
2006 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
2007 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
2008 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
2009 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
2010 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
2011 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
2012 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
2013 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
2014 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
2015 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
2016 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
2017 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
2018 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
2019 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
2020 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
2021 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
2022 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
2023 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
2024 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
2025 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY
, UINT64
, AVERAGE
),
2026 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
2033 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
2035 if (rscreen
->info
.drm_minor
>= 42)
2036 return ARRAY_SIZE(r600_driver_query_list
);
2038 return ARRAY_SIZE(r600_driver_query_list
) - 25;
2041 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
2043 struct pipe_driver_query_info
*info
)
2045 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
2046 unsigned num_queries
= r600_get_num_queries(rscreen
);
2049 unsigned num_perfcounters
=
2050 r600_get_perfcounter_info(rscreen
, 0, NULL
);
2052 return num_queries
+ num_perfcounters
;
2055 if (index
>= num_queries
)
2056 return r600_get_perfcounter_info(rscreen
, index
- num_queries
, info
);
2058 *info
= r600_driver_query_list
[index
];
2060 switch (info
->query_type
) {
2061 case R600_QUERY_REQUESTED_VRAM
:
2062 case R600_QUERY_VRAM_USAGE
:
2063 case R600_QUERY_MAPPED_VRAM
:
2064 info
->max_value
.u64
= rscreen
->info
.vram_size
;
2066 case R600_QUERY_REQUESTED_GTT
:
2067 case R600_QUERY_GTT_USAGE
:
2068 case R600_QUERY_MAPPED_GTT
:
2069 info
->max_value
.u64
= rscreen
->info
.gart_size
;
2071 case R600_QUERY_GPU_TEMPERATURE
:
2072 info
->max_value
.u64
= 125;
2074 case R600_QUERY_VRAM_VIS_USAGE
:
2075 info
->max_value
.u64
= rscreen
->info
.vram_vis_size
;
2079 if (info
->group_id
!= ~(unsigned)0 && rscreen
->perfcounters
)
2080 info
->group_id
+= rscreen
->perfcounters
->num_groups
;
2085 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2086 * performance counter groups, so be careful when changing this and related
2089 static int r600_get_driver_query_group_info(struct pipe_screen
*screen
,
2091 struct pipe_driver_query_group_info
*info
)
2093 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
2094 unsigned num_pc_groups
= 0;
2096 if (rscreen
->perfcounters
)
2097 num_pc_groups
= rscreen
->perfcounters
->num_groups
;
2100 return num_pc_groups
+ R600_NUM_SW_QUERY_GROUPS
;
2102 if (index
< num_pc_groups
)
2103 return r600_get_perfcounter_group_info(rscreen
, index
, info
);
2105 index
-= num_pc_groups
;
2106 if (index
>= R600_NUM_SW_QUERY_GROUPS
)
2109 info
->name
= "GPIN";
2110 info
->max_active_queries
= 5;
2111 info
->num_queries
= 5;
2115 void r600_query_init(struct r600_common_context
*rctx
)
2117 rctx
->b
.create_query
= r600_create_query
;
2118 rctx
->b
.create_batch_query
= r600_create_batch_query
;
2119 rctx
->b
.destroy_query
= r600_destroy_query
;
2120 rctx
->b
.begin_query
= r600_begin_query
;
2121 rctx
->b
.end_query
= r600_end_query
;
2122 rctx
->b
.get_query_result
= r600_get_query_result
;
2123 rctx
->b
.get_query_result_resource
= r600_get_query_result_resource
;
2124 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
2126 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.num_render_backends
> 0)
2127 rctx
->b
.render_condition
= r600_render_condition
;
2129 list_inithead(&rctx
->active_queries
);
2132 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
2134 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;
2135 rscreen
->b
.get_driver_query_group_info
= r600_get_driver_query_group_info
;