2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "os/os_time.h"
30 #include "tgsi/tgsi_text.h"
32 struct r600_hw_query_params
{
33 unsigned start_offset
;
35 unsigned fence_offset
;
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw
{
44 uint64_t begin_result
;
50 /* Fence for GPU_FINISHED. */
51 struct pipe_fence_handle
*fence
;
54 static void r600_query_sw_destroy(struct r600_common_screen
*rscreen
,
55 struct r600_query
*rquery
)
57 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
59 rscreen
->b
.fence_reference(&rscreen
->b
, &query
->fence
, NULL
);
63 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
66 case R600_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
67 case R600_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
68 case R600_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
69 case R600_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
70 case R600_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
71 case R600_QUERY_NUM_MAPPED_BUFFERS
: return RADEON_NUM_MAPPED_BUFFERS
;
72 case R600_QUERY_NUM_GFX_IBS
: return RADEON_NUM_GFX_IBS
;
73 case R600_QUERY_NUM_SDMA_IBS
: return RADEON_NUM_SDMA_IBS
;
74 case R600_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
75 case R600_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
76 case R600_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
77 case R600_QUERY_VRAM_VIS_USAGE
: return RADEON_VRAM_VIS_USAGE
;
78 case R600_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
79 case R600_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
80 case R600_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
81 case R600_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
82 case R600_QUERY_CS_THREAD_BUSY
: return RADEON_CS_THREAD_TIME
;
83 default: unreachable("query type does not correspond to winsys id");
87 static bool r600_query_sw_begin(struct r600_common_context
*rctx
,
88 struct r600_query
*rquery
)
90 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
91 enum radeon_value_id ws_id
;
93 switch(query
->b
.type
) {
94 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
95 case PIPE_QUERY_GPU_FINISHED
:
97 case R600_QUERY_DRAW_CALLS
:
98 query
->begin_result
= rctx
->num_draw_calls
;
100 case R600_QUERY_SPILL_DRAW_CALLS
:
101 query
->begin_result
= rctx
->num_spill_draw_calls
;
103 case R600_QUERY_COMPUTE_CALLS
:
104 query
->begin_result
= rctx
->num_compute_calls
;
106 case R600_QUERY_SPILL_COMPUTE_CALLS
:
107 query
->begin_result
= rctx
->num_spill_compute_calls
;
109 case R600_QUERY_DMA_CALLS
:
110 query
->begin_result
= rctx
->num_dma_calls
;
112 case R600_QUERY_CP_DMA_CALLS
:
113 query
->begin_result
= rctx
->num_cp_dma_calls
;
115 case R600_QUERY_NUM_VS_FLUSHES
:
116 query
->begin_result
= rctx
->num_vs_flushes
;
118 case R600_QUERY_NUM_PS_FLUSHES
:
119 query
->begin_result
= rctx
->num_ps_flushes
;
121 case R600_QUERY_NUM_CS_FLUSHES
:
122 query
->begin_result
= rctx
->num_cs_flushes
;
124 case R600_QUERY_NUM_FB_CACHE_FLUSHES
:
125 query
->begin_result
= rctx
->num_fb_cache_flushes
;
127 case R600_QUERY_NUM_L2_INVALIDATES
:
128 query
->begin_result
= rctx
->num_L2_invalidates
;
130 case R600_QUERY_NUM_L2_WRITEBACKS
:
131 query
->begin_result
= rctx
->num_L2_writebacks
;
133 case R600_QUERY_REQUESTED_VRAM
:
134 case R600_QUERY_REQUESTED_GTT
:
135 case R600_QUERY_MAPPED_VRAM
:
136 case R600_QUERY_MAPPED_GTT
:
137 case R600_QUERY_VRAM_USAGE
:
138 case R600_QUERY_VRAM_VIS_USAGE
:
139 case R600_QUERY_GTT_USAGE
:
140 case R600_QUERY_GPU_TEMPERATURE
:
141 case R600_QUERY_CURRENT_GPU_SCLK
:
142 case R600_QUERY_CURRENT_GPU_MCLK
:
143 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
144 case R600_QUERY_NUM_MAPPED_BUFFERS
:
145 query
->begin_result
= 0;
147 case R600_QUERY_BUFFER_WAIT_TIME
:
148 case R600_QUERY_NUM_GFX_IBS
:
149 case R600_QUERY_NUM_SDMA_IBS
:
150 case R600_QUERY_NUM_BYTES_MOVED
:
151 case R600_QUERY_NUM_EVICTIONS
: {
152 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
153 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
156 case R600_QUERY_CS_THREAD_BUSY
:
157 ws_id
= winsys_id_from_type(query
->b
.type
);
158 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
159 query
->begin_time
= os_time_get_nano();
161 case R600_QUERY_GPU_LOAD
:
162 case R600_QUERY_GPU_SHADERS_BUSY
:
163 case R600_QUERY_GPU_TA_BUSY
:
164 case R600_QUERY_GPU_GDS_BUSY
:
165 case R600_QUERY_GPU_VGT_BUSY
:
166 case R600_QUERY_GPU_IA_BUSY
:
167 case R600_QUERY_GPU_SX_BUSY
:
168 case R600_QUERY_GPU_WD_BUSY
:
169 case R600_QUERY_GPU_BCI_BUSY
:
170 case R600_QUERY_GPU_SC_BUSY
:
171 case R600_QUERY_GPU_PA_BUSY
:
172 case R600_QUERY_GPU_DB_BUSY
:
173 case R600_QUERY_GPU_CP_BUSY
:
174 case R600_QUERY_GPU_CB_BUSY
:
175 case R600_QUERY_GPU_SDMA_BUSY
:
176 case R600_QUERY_GPU_PFP_BUSY
:
177 case R600_QUERY_GPU_MEQ_BUSY
:
178 case R600_QUERY_GPU_ME_BUSY
:
179 case R600_QUERY_GPU_SURF_SYNC_BUSY
:
180 case R600_QUERY_GPU_DMA_BUSY
:
181 case R600_QUERY_GPU_SCRATCH_RAM_BUSY
:
182 case R600_QUERY_GPU_CE_BUSY
:
183 query
->begin_result
= r600_begin_counter(rctx
->screen
,
186 case R600_QUERY_NUM_COMPILATIONS
:
187 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
189 case R600_QUERY_NUM_SHADERS_CREATED
:
190 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
192 case R600_QUERY_NUM_SHADER_CACHE_HITS
:
193 query
->begin_result
=
194 p_atomic_read(&rctx
->screen
->num_shader_cache_hits
);
196 case R600_QUERY_GPIN_ASIC_ID
:
197 case R600_QUERY_GPIN_NUM_SIMD
:
198 case R600_QUERY_GPIN_NUM_RB
:
199 case R600_QUERY_GPIN_NUM_SPI
:
200 case R600_QUERY_GPIN_NUM_SE
:
203 unreachable("r600_query_sw_begin: bad query type");
209 static bool r600_query_sw_end(struct r600_common_context
*rctx
,
210 struct r600_query
*rquery
)
212 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
213 enum radeon_value_id ws_id
;
215 switch(query
->b
.type
) {
216 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
218 case PIPE_QUERY_GPU_FINISHED
:
219 rctx
->b
.flush(&rctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
221 case R600_QUERY_DRAW_CALLS
:
222 query
->end_result
= rctx
->num_draw_calls
;
224 case R600_QUERY_SPILL_DRAW_CALLS
:
225 query
->end_result
= rctx
->num_spill_draw_calls
;
227 case R600_QUERY_COMPUTE_CALLS
:
228 query
->end_result
= rctx
->num_compute_calls
;
230 case R600_QUERY_SPILL_COMPUTE_CALLS
:
231 query
->end_result
= rctx
->num_spill_compute_calls
;
233 case R600_QUERY_DMA_CALLS
:
234 query
->end_result
= rctx
->num_dma_calls
;
236 case R600_QUERY_CP_DMA_CALLS
:
237 query
->end_result
= rctx
->num_cp_dma_calls
;
239 case R600_QUERY_NUM_VS_FLUSHES
:
240 query
->end_result
= rctx
->num_vs_flushes
;
242 case R600_QUERY_NUM_PS_FLUSHES
:
243 query
->end_result
= rctx
->num_ps_flushes
;
245 case R600_QUERY_NUM_CS_FLUSHES
:
246 query
->end_result
= rctx
->num_cs_flushes
;
248 case R600_QUERY_NUM_FB_CACHE_FLUSHES
:
249 query
->end_result
= rctx
->num_fb_cache_flushes
;
251 case R600_QUERY_NUM_L2_INVALIDATES
:
252 query
->end_result
= rctx
->num_L2_invalidates
;
254 case R600_QUERY_NUM_L2_WRITEBACKS
:
255 query
->end_result
= rctx
->num_L2_writebacks
;
257 case R600_QUERY_REQUESTED_VRAM
:
258 case R600_QUERY_REQUESTED_GTT
:
259 case R600_QUERY_MAPPED_VRAM
:
260 case R600_QUERY_MAPPED_GTT
:
261 case R600_QUERY_VRAM_USAGE
:
262 case R600_QUERY_VRAM_VIS_USAGE
:
263 case R600_QUERY_GTT_USAGE
:
264 case R600_QUERY_GPU_TEMPERATURE
:
265 case R600_QUERY_CURRENT_GPU_SCLK
:
266 case R600_QUERY_CURRENT_GPU_MCLK
:
267 case R600_QUERY_BUFFER_WAIT_TIME
:
268 case R600_QUERY_NUM_MAPPED_BUFFERS
:
269 case R600_QUERY_NUM_GFX_IBS
:
270 case R600_QUERY_NUM_SDMA_IBS
:
271 case R600_QUERY_NUM_BYTES_MOVED
:
272 case R600_QUERY_NUM_EVICTIONS
: {
273 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
274 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
277 case R600_QUERY_CS_THREAD_BUSY
:
278 ws_id
= winsys_id_from_type(query
->b
.type
);
279 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
280 query
->end_time
= os_time_get_nano();
282 case R600_QUERY_GPU_LOAD
:
283 case R600_QUERY_GPU_SHADERS_BUSY
:
284 case R600_QUERY_GPU_TA_BUSY
:
285 case R600_QUERY_GPU_GDS_BUSY
:
286 case R600_QUERY_GPU_VGT_BUSY
:
287 case R600_QUERY_GPU_IA_BUSY
:
288 case R600_QUERY_GPU_SX_BUSY
:
289 case R600_QUERY_GPU_WD_BUSY
:
290 case R600_QUERY_GPU_BCI_BUSY
:
291 case R600_QUERY_GPU_SC_BUSY
:
292 case R600_QUERY_GPU_PA_BUSY
:
293 case R600_QUERY_GPU_DB_BUSY
:
294 case R600_QUERY_GPU_CP_BUSY
:
295 case R600_QUERY_GPU_CB_BUSY
:
296 case R600_QUERY_GPU_SDMA_BUSY
:
297 case R600_QUERY_GPU_PFP_BUSY
:
298 case R600_QUERY_GPU_MEQ_BUSY
:
299 case R600_QUERY_GPU_ME_BUSY
:
300 case R600_QUERY_GPU_SURF_SYNC_BUSY
:
301 case R600_QUERY_GPU_DMA_BUSY
:
302 case R600_QUERY_GPU_SCRATCH_RAM_BUSY
:
303 case R600_QUERY_GPU_CE_BUSY
:
304 query
->end_result
= r600_end_counter(rctx
->screen
,
306 query
->begin_result
);
307 query
->begin_result
= 0;
309 case R600_QUERY_NUM_COMPILATIONS
:
310 query
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
312 case R600_QUERY_NUM_SHADERS_CREATED
:
313 query
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
315 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
316 query
->end_result
= rctx
->last_tex_ps_draw_ratio
;
318 case R600_QUERY_NUM_SHADER_CACHE_HITS
:
320 p_atomic_read(&rctx
->screen
->num_shader_cache_hits
);
322 case R600_QUERY_GPIN_ASIC_ID
:
323 case R600_QUERY_GPIN_NUM_SIMD
:
324 case R600_QUERY_GPIN_NUM_RB
:
325 case R600_QUERY_GPIN_NUM_SPI
:
326 case R600_QUERY_GPIN_NUM_SE
:
329 unreachable("r600_query_sw_end: bad query type");
335 static bool r600_query_sw_get_result(struct r600_common_context
*rctx
,
336 struct r600_query
*rquery
,
338 union pipe_query_result
*result
)
340 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
342 switch (query
->b
.type
) {
343 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
344 /* Convert from cycles per millisecond to cycles per second (Hz). */
345 result
->timestamp_disjoint
.frequency
=
346 (uint64_t)rctx
->screen
->info
.clock_crystal_freq
* 1000;
347 result
->timestamp_disjoint
.disjoint
= false;
349 case PIPE_QUERY_GPU_FINISHED
: {
350 struct pipe_screen
*screen
= rctx
->b
.screen
;
351 result
->b
= screen
->fence_finish(screen
, &rctx
->b
, query
->fence
,
352 wait
? PIPE_TIMEOUT_INFINITE
: 0);
356 case R600_QUERY_CS_THREAD_BUSY
:
357 result
->u64
= (query
->end_result
- query
->begin_result
) * 100 /
358 (query
->end_time
- query
->begin_time
);
360 case R600_QUERY_GPIN_ASIC_ID
:
363 case R600_QUERY_GPIN_NUM_SIMD
:
364 result
->u32
= rctx
->screen
->info
.num_good_compute_units
;
366 case R600_QUERY_GPIN_NUM_RB
:
367 result
->u32
= rctx
->screen
->info
.num_render_backends
;
369 case R600_QUERY_GPIN_NUM_SPI
:
370 result
->u32
= 1; /* all supported chips have one SPI per SE */
372 case R600_QUERY_GPIN_NUM_SE
:
373 result
->u32
= rctx
->screen
->info
.max_se
;
377 result
->u64
= query
->end_result
- query
->begin_result
;
379 switch (query
->b
.type
) {
380 case R600_QUERY_BUFFER_WAIT_TIME
:
381 case R600_QUERY_GPU_TEMPERATURE
:
384 case R600_QUERY_CURRENT_GPU_SCLK
:
385 case R600_QUERY_CURRENT_GPU_MCLK
:
386 result
->u64
*= 1000000;
394 static struct r600_query_ops sw_query_ops
= {
395 .destroy
= r600_query_sw_destroy
,
396 .begin
= r600_query_sw_begin
,
397 .end
= r600_query_sw_end
,
398 .get_result
= r600_query_sw_get_result
,
399 .get_result_resource
= NULL
402 static struct pipe_query
*r600_query_sw_create(unsigned query_type
)
404 struct r600_query_sw
*query
;
406 query
= CALLOC_STRUCT(r600_query_sw
);
410 query
->b
.type
= query_type
;
411 query
->b
.ops
= &sw_query_ops
;
413 return (struct pipe_query
*)query
;
416 void r600_query_hw_destroy(struct r600_common_screen
*rscreen
,
417 struct r600_query
*rquery
)
419 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
420 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
422 /* Release all query buffers. */
424 struct r600_query_buffer
*qbuf
= prev
;
425 prev
= prev
->previous
;
426 r600_resource_reference(&qbuf
->buf
, NULL
);
430 r600_resource_reference(&query
->buffer
.buf
, NULL
);
434 static struct r600_resource
*r600_new_query_buffer(struct r600_common_screen
*rscreen
,
435 struct r600_query_hw
*query
)
437 unsigned buf_size
= MAX2(query
->result_size
,
438 rscreen
->info
.min_alloc_size
);
440 /* Queries are normally read by the CPU after
441 * being written by the gpu, hence staging is probably a good
444 struct r600_resource
*buf
= (struct r600_resource
*)
445 pipe_buffer_create(&rscreen
->b
, 0,
446 PIPE_USAGE_STAGING
, buf_size
);
450 if (!query
->ops
->prepare_buffer(rscreen
, query
, buf
)) {
451 r600_resource_reference(&buf
, NULL
);
458 static bool r600_query_hw_prepare_buffer(struct r600_common_screen
*rscreen
,
459 struct r600_query_hw
*query
,
460 struct r600_resource
*buffer
)
462 /* Callers ensure that the buffer is currently unused by the GPU. */
463 uint32_t *results
= rscreen
->ws
->buffer_map(buffer
->buf
, NULL
,
464 PIPE_TRANSFER_WRITE
|
465 PIPE_TRANSFER_UNSYNCHRONIZED
);
469 memset(results
, 0, buffer
->b
.b
.width0
);
471 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
472 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
473 unsigned max_rbs
= rscreen
->info
.num_render_backends
;
474 unsigned enabled_rb_mask
= rscreen
->info
.enabled_rb_mask
;
475 unsigned num_results
;
478 /* Set top bits for unused backends. */
479 num_results
= buffer
->b
.b
.width0
/ query
->result_size
;
480 for (j
= 0; j
< num_results
; j
++) {
481 for (i
= 0; i
< max_rbs
; i
++) {
482 if (!(enabled_rb_mask
& (1<<i
))) {
483 results
[(i
* 4)+1] = 0x80000000;
484 results
[(i
* 4)+3] = 0x80000000;
487 results
+= 4 * max_rbs
;
494 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
495 struct r600_query
*rquery
,
497 enum pipe_query_value_type result_type
,
499 struct pipe_resource
*resource
,
502 static struct r600_query_ops query_hw_ops
= {
503 .destroy
= r600_query_hw_destroy
,
504 .begin
= r600_query_hw_begin
,
505 .end
= r600_query_hw_end
,
506 .get_result
= r600_query_hw_get_result
,
507 .get_result_resource
= r600_query_hw_get_result_resource
,
510 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
511 struct r600_query_hw
*query
,
512 struct r600_resource
*buffer
,
514 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
515 struct r600_query_hw
*query
,
516 struct r600_resource
*buffer
,
518 static void r600_query_hw_add_result(struct r600_common_screen
*rscreen
,
519 struct r600_query_hw
*, void *buffer
,
520 union pipe_query_result
*result
);
521 static void r600_query_hw_clear_result(struct r600_query_hw
*,
522 union pipe_query_result
*);
524 static struct r600_query_hw_ops query_hw_default_hw_ops
= {
525 .prepare_buffer
= r600_query_hw_prepare_buffer
,
526 .emit_start
= r600_query_hw_do_emit_start
,
527 .emit_stop
= r600_query_hw_do_emit_stop
,
528 .clear_result
= r600_query_hw_clear_result
,
529 .add_result
= r600_query_hw_add_result
,
532 bool r600_query_hw_init(struct r600_common_screen
*rscreen
,
533 struct r600_query_hw
*query
)
535 query
->buffer
.buf
= r600_new_query_buffer(rscreen
, query
);
536 if (!query
->buffer
.buf
)
542 static struct pipe_query
*r600_query_hw_create(struct r600_common_screen
*rscreen
,
546 struct r600_query_hw
*query
= CALLOC_STRUCT(r600_query_hw
);
550 query
->b
.type
= query_type
;
551 query
->b
.ops
= &query_hw_ops
;
552 query
->ops
= &query_hw_default_hw_ops
;
554 switch (query_type
) {
555 case PIPE_QUERY_OCCLUSION_COUNTER
:
556 case PIPE_QUERY_OCCLUSION_PREDICATE
:
557 query
->result_size
= 16 * rscreen
->info
.num_render_backends
;
558 query
->result_size
+= 16; /* for the fence + alignment */
559 query
->num_cs_dw_begin
= 6;
560 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rscreen
);
562 case PIPE_QUERY_TIME_ELAPSED
:
563 query
->result_size
= 24;
564 query
->num_cs_dw_begin
= 8;
565 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rscreen
);
567 case PIPE_QUERY_TIMESTAMP
:
568 query
->result_size
= 16;
569 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rscreen
);
570 query
->flags
= R600_QUERY_HW_FLAG_NO_START
;
572 case PIPE_QUERY_PRIMITIVES_EMITTED
:
573 case PIPE_QUERY_PRIMITIVES_GENERATED
:
574 case PIPE_QUERY_SO_STATISTICS
:
575 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
576 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
577 query
->result_size
= 32;
578 query
->num_cs_dw_begin
= 6;
579 query
->num_cs_dw_end
= 6;
580 query
->stream
= index
;
582 case PIPE_QUERY_PIPELINE_STATISTICS
:
583 /* 11 values on EG, 8 on R600. */
584 query
->result_size
= (rscreen
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
585 query
->result_size
+= 8; /* for the fence + alignment */
586 query
->num_cs_dw_begin
= 6;
587 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rscreen
);
595 if (!r600_query_hw_init(rscreen
, query
)) {
600 return (struct pipe_query
*)query
;
603 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
604 unsigned type
, int diff
)
606 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
607 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
608 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
609 bool old_perfect_enable
=
610 rctx
->num_perfect_occlusion_queries
!= 0;
611 bool enable
, perfect_enable
;
613 rctx
->num_occlusion_queries
+= diff
;
614 assert(rctx
->num_occlusion_queries
>= 0);
616 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
617 rctx
->num_perfect_occlusion_queries
+= diff
;
618 assert(rctx
->num_perfect_occlusion_queries
>= 0);
621 enable
= rctx
->num_occlusion_queries
!= 0;
622 perfect_enable
= rctx
->num_perfect_occlusion_queries
!= 0;
624 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
625 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
630 static unsigned event_type_for_stream(struct r600_query_hw
*query
)
632 switch (query
->stream
) {
634 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
635 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
636 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
637 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
641 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
642 struct r600_query_hw
*query
,
643 struct r600_resource
*buffer
,
646 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
648 switch (query
->b
.type
) {
649 case PIPE_QUERY_OCCLUSION_COUNTER
:
650 case PIPE_QUERY_OCCLUSION_PREDICATE
:
651 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
652 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
654 radeon_emit(cs
, va
>> 32);
656 case PIPE_QUERY_PRIMITIVES_EMITTED
:
657 case PIPE_QUERY_PRIMITIVES_GENERATED
:
658 case PIPE_QUERY_SO_STATISTICS
:
659 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
660 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
661 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
663 radeon_emit(cs
, va
>> 32);
665 case PIPE_QUERY_TIME_ELAPSED
:
666 r600_gfx_write_event_eop(ctx
, EVENT_TYPE_BOTTOM_OF_PIPE_TS
,
667 0, 3, NULL
, va
, 0, 0);
669 case PIPE_QUERY_PIPELINE_STATISTICS
:
670 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
671 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
673 radeon_emit(cs
, va
>> 32);
678 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
682 static void r600_query_hw_emit_start(struct r600_common_context
*ctx
,
683 struct r600_query_hw
*query
)
687 if (!query
->buffer
.buf
)
688 return; // previous buffer allocation failure
690 r600_update_occlusion_query_state(ctx
, query
->b
.type
, 1);
691 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, 1);
693 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_begin
+ query
->num_cs_dw_end
,
696 /* Get a new query buffer if needed. */
697 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
698 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
699 *qbuf
= query
->buffer
;
700 query
->buffer
.results_end
= 0;
701 query
->buffer
.previous
= qbuf
;
702 query
->buffer
.buf
= r600_new_query_buffer(ctx
->screen
, query
);
703 if (!query
->buffer
.buf
)
707 /* emit begin query */
708 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
710 query
->ops
->emit_start(ctx
, query
, query
->buffer
.buf
, va
);
712 ctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
715 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
716 struct r600_query_hw
*query
,
717 struct r600_resource
*buffer
,
720 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
721 uint64_t fence_va
= 0;
723 switch (query
->b
.type
) {
724 case PIPE_QUERY_OCCLUSION_COUNTER
:
725 case PIPE_QUERY_OCCLUSION_PREDICATE
:
727 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
728 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
730 radeon_emit(cs
, va
>> 32);
732 fence_va
= va
+ ctx
->screen
->info
.num_render_backends
* 16 - 8;
734 case PIPE_QUERY_PRIMITIVES_EMITTED
:
735 case PIPE_QUERY_PRIMITIVES_GENERATED
:
736 case PIPE_QUERY_SO_STATISTICS
:
737 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
738 va
+= query
->result_size
/2;
739 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
740 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
742 radeon_emit(cs
, va
>> 32);
744 case PIPE_QUERY_TIME_ELAPSED
:
747 case PIPE_QUERY_TIMESTAMP
:
748 r600_gfx_write_event_eop(ctx
, EVENT_TYPE_BOTTOM_OF_PIPE_TS
,
749 0, 3, NULL
, va
, 0, 0);
752 case PIPE_QUERY_PIPELINE_STATISTICS
: {
753 unsigned sample_size
= (query
->result_size
- 8) / 2;
756 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
757 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
759 radeon_emit(cs
, va
>> 32);
761 fence_va
= va
+ sample_size
;
767 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
771 r600_gfx_write_event_eop(ctx
, EVENT_TYPE_BOTTOM_OF_PIPE_TS
, 0, 1,
772 query
->buffer
.buf
, fence_va
, 0, 0x80000000);
775 static void r600_query_hw_emit_stop(struct r600_common_context
*ctx
,
776 struct r600_query_hw
*query
)
780 if (!query
->buffer
.buf
)
781 return; // previous buffer allocation failure
783 /* The queries which need begin already called this in begin_query. */
784 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
785 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_end
, false);
789 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
791 query
->ops
->emit_stop(ctx
, query
, query
->buffer
.buf
, va
);
793 query
->buffer
.results_end
+= query
->result_size
;
795 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
796 ctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
798 r600_update_occlusion_query_state(ctx
, query
->b
.type
, -1);
799 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, -1);
802 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
803 struct r600_atom
*atom
)
805 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
806 struct r600_query_hw
*query
= (struct r600_query_hw
*)ctx
->render_cond
;
807 struct r600_query_buffer
*qbuf
;
814 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
815 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
817 switch (query
->b
.type
) {
818 case PIPE_QUERY_OCCLUSION_COUNTER
:
819 case PIPE_QUERY_OCCLUSION_PREDICATE
:
820 op
= PRED_OP(PREDICATION_OP_ZPASS
);
822 case PIPE_QUERY_PRIMITIVES_EMITTED
:
823 case PIPE_QUERY_PRIMITIVES_GENERATED
:
824 case PIPE_QUERY_SO_STATISTICS
:
825 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
826 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
833 /* if true then invert, see GL_ARB_conditional_render_inverted */
834 if (ctx
->render_cond_invert
)
835 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
837 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
839 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
841 /* emit predicate packets for all data blocks */
842 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
843 unsigned results_base
= 0;
844 uint64_t va_base
= qbuf
->buf
->gpu_address
;
846 while (results_base
< qbuf
->results_end
) {
847 uint64_t va
= va_base
+ results_base
;
849 if (ctx
->chip_class
>= GFX9
) {
850 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 2, 0));
853 radeon_emit(cs
, va
>> 32);
855 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
857 radeon_emit(cs
, op
| ((va
>> 32) & 0xFF));
859 r600_emit_reloc(ctx
, &ctx
->gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
861 results_base
+= query
->result_size
;
863 /* set CONTINUE bit for all packets except the first */
864 op
|= PREDICATION_CONTINUE
;
869 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
871 struct r600_common_screen
*rscreen
=
872 (struct r600_common_screen
*)ctx
->screen
;
874 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
875 query_type
== PIPE_QUERY_GPU_FINISHED
||
876 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
877 return r600_query_sw_create(query_type
);
879 return r600_query_hw_create(rscreen
, query_type
, index
);
882 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
884 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
885 struct r600_query
*rquery
= (struct r600_query
*)query
;
887 rquery
->ops
->destroy(rctx
->screen
, rquery
);
890 static boolean
r600_begin_query(struct pipe_context
*ctx
,
891 struct pipe_query
*query
)
893 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
894 struct r600_query
*rquery
= (struct r600_query
*)query
;
896 return rquery
->ops
->begin(rctx
, rquery
);
899 void r600_query_hw_reset_buffers(struct r600_common_context
*rctx
,
900 struct r600_query_hw
*query
)
902 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
904 /* Discard the old query buffers. */
906 struct r600_query_buffer
*qbuf
= prev
;
907 prev
= prev
->previous
;
908 r600_resource_reference(&qbuf
->buf
, NULL
);
912 query
->buffer
.results_end
= 0;
913 query
->buffer
.previous
= NULL
;
915 /* Obtain a new buffer if the current one can't be mapped without a stall. */
916 if (r600_rings_is_buffer_referenced(rctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
917 !rctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
918 r600_resource_reference(&query
->buffer
.buf
, NULL
);
919 query
->buffer
.buf
= r600_new_query_buffer(rctx
->screen
, query
);
921 if (!query
->ops
->prepare_buffer(rctx
->screen
, query
, query
->buffer
.buf
))
922 r600_resource_reference(&query
->buffer
.buf
, NULL
);
926 bool r600_query_hw_begin(struct r600_common_context
*rctx
,
927 struct r600_query
*rquery
)
929 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
931 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
936 if (!(query
->flags
& R600_QUERY_HW_FLAG_BEGIN_RESUMES
))
937 r600_query_hw_reset_buffers(rctx
, query
);
939 r600_query_hw_emit_start(rctx
, query
);
940 if (!query
->buffer
.buf
)
943 LIST_ADDTAIL(&query
->list
, &rctx
->active_queries
);
947 static bool r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
949 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
950 struct r600_query
*rquery
= (struct r600_query
*)query
;
952 return rquery
->ops
->end(rctx
, rquery
);
955 bool r600_query_hw_end(struct r600_common_context
*rctx
,
956 struct r600_query
*rquery
)
958 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
960 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
)
961 r600_query_hw_reset_buffers(rctx
, query
);
963 r600_query_hw_emit_stop(rctx
, query
);
965 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
966 LIST_DELINIT(&query
->list
);
968 if (!query
->buffer
.buf
)
974 static void r600_get_hw_query_params(struct r600_common_context
*rctx
,
975 struct r600_query_hw
*rquery
, int index
,
976 struct r600_hw_query_params
*params
)
978 unsigned max_rbs
= rctx
->screen
->info
.num_render_backends
;
980 params
->pair_stride
= 0;
981 params
->pair_count
= 1;
983 switch (rquery
->b
.type
) {
984 case PIPE_QUERY_OCCLUSION_COUNTER
:
985 case PIPE_QUERY_OCCLUSION_PREDICATE
:
986 params
->start_offset
= 0;
987 params
->end_offset
= 8;
988 params
->fence_offset
= max_rbs
* 16;
989 params
->pair_stride
= 16;
990 params
->pair_count
= max_rbs
;
992 case PIPE_QUERY_TIME_ELAPSED
:
993 params
->start_offset
= 0;
994 params
->end_offset
= 8;
995 params
->fence_offset
= 16;
997 case PIPE_QUERY_TIMESTAMP
:
998 params
->start_offset
= 0;
999 params
->end_offset
= 0;
1000 params
->fence_offset
= 8;
1002 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1003 params
->start_offset
= 8;
1004 params
->end_offset
= 24;
1005 params
->fence_offset
= params
->end_offset
+ 4;
1007 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1008 params
->start_offset
= 0;
1009 params
->end_offset
= 16;
1010 params
->fence_offset
= params
->end_offset
+ 4;
1012 case PIPE_QUERY_SO_STATISTICS
:
1013 params
->start_offset
= 8 - index
* 8;
1014 params
->end_offset
= 24 - index
* 8;
1015 params
->fence_offset
= params
->end_offset
+ 4;
1017 case PIPE_QUERY_PIPELINE_STATISTICS
:
1019 /* Offsets apply to EG+ */
1020 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1021 params
->start_offset
= offsets
[index
];
1022 params
->end_offset
= 88 + offsets
[index
];
1023 params
->fence_offset
= 2 * 88;
1027 unreachable("r600_get_hw_query_params unsupported");
1031 static unsigned r600_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
1032 bool test_status_bit
)
1034 uint32_t *current_result
= (uint32_t*)map
;
1035 uint64_t start
, end
;
1037 start
= (uint64_t)current_result
[start_index
] |
1038 (uint64_t)current_result
[start_index
+1] << 32;
1039 end
= (uint64_t)current_result
[end_index
] |
1040 (uint64_t)current_result
[end_index
+1] << 32;
1042 if (!test_status_bit
||
1043 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
1049 static void r600_query_hw_add_result(struct r600_common_screen
*rscreen
,
1050 struct r600_query_hw
*query
,
1052 union pipe_query_result
*result
)
1054 unsigned max_rbs
= rscreen
->info
.num_render_backends
;
1056 switch (query
->b
.type
) {
1057 case PIPE_QUERY_OCCLUSION_COUNTER
: {
1058 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1059 unsigned results_base
= i
* 16;
1061 r600_query_read_result(buffer
+ results_base
, 0, 2, true);
1065 case PIPE_QUERY_OCCLUSION_PREDICATE
: {
1066 for (unsigned i
= 0; i
< max_rbs
; ++i
) {
1067 unsigned results_base
= i
* 16;
1068 result
->b
= result
->b
||
1069 r600_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
1073 case PIPE_QUERY_TIME_ELAPSED
:
1074 result
->u64
+= r600_query_read_result(buffer
, 0, 2, false);
1076 case PIPE_QUERY_TIMESTAMP
:
1077 result
->u64
= *(uint64_t*)buffer
;
1079 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1080 /* SAMPLE_STREAMOUTSTATS stores this structure:
1082 * u64 NumPrimitivesWritten;
1083 * u64 PrimitiveStorageNeeded;
1085 * We only need NumPrimitivesWritten here. */
1086 result
->u64
+= r600_query_read_result(buffer
, 2, 6, true);
1088 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1089 /* Here we read PrimitiveStorageNeeded. */
1090 result
->u64
+= r600_query_read_result(buffer
, 0, 4, true);
1092 case PIPE_QUERY_SO_STATISTICS
:
1093 result
->so_statistics
.num_primitives_written
+=
1094 r600_query_read_result(buffer
, 2, 6, true);
1095 result
->so_statistics
.primitives_storage_needed
+=
1096 r600_query_read_result(buffer
, 0, 4, true);
1098 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
1099 result
->b
= result
->b
||
1100 r600_query_read_result(buffer
, 2, 6, true) !=
1101 r600_query_read_result(buffer
, 0, 4, true);
1103 case PIPE_QUERY_PIPELINE_STATISTICS
:
1104 if (rscreen
->chip_class
>= EVERGREEN
) {
1105 result
->pipeline_statistics
.ps_invocations
+=
1106 r600_query_read_result(buffer
, 0, 22, false);
1107 result
->pipeline_statistics
.c_primitives
+=
1108 r600_query_read_result(buffer
, 2, 24, false);
1109 result
->pipeline_statistics
.c_invocations
+=
1110 r600_query_read_result(buffer
, 4, 26, false);
1111 result
->pipeline_statistics
.vs_invocations
+=
1112 r600_query_read_result(buffer
, 6, 28, false);
1113 result
->pipeline_statistics
.gs_invocations
+=
1114 r600_query_read_result(buffer
, 8, 30, false);
1115 result
->pipeline_statistics
.gs_primitives
+=
1116 r600_query_read_result(buffer
, 10, 32, false);
1117 result
->pipeline_statistics
.ia_primitives
+=
1118 r600_query_read_result(buffer
, 12, 34, false);
1119 result
->pipeline_statistics
.ia_vertices
+=
1120 r600_query_read_result(buffer
, 14, 36, false);
1121 result
->pipeline_statistics
.hs_invocations
+=
1122 r600_query_read_result(buffer
, 16, 38, false);
1123 result
->pipeline_statistics
.ds_invocations
+=
1124 r600_query_read_result(buffer
, 18, 40, false);
1125 result
->pipeline_statistics
.cs_invocations
+=
1126 r600_query_read_result(buffer
, 20, 42, false);
1128 result
->pipeline_statistics
.ps_invocations
+=
1129 r600_query_read_result(buffer
, 0, 16, false);
1130 result
->pipeline_statistics
.c_primitives
+=
1131 r600_query_read_result(buffer
, 2, 18, false);
1132 result
->pipeline_statistics
.c_invocations
+=
1133 r600_query_read_result(buffer
, 4, 20, false);
1134 result
->pipeline_statistics
.vs_invocations
+=
1135 r600_query_read_result(buffer
, 6, 22, false);
1136 result
->pipeline_statistics
.gs_invocations
+=
1137 r600_query_read_result(buffer
, 8, 24, false);
1138 result
->pipeline_statistics
.gs_primitives
+=
1139 r600_query_read_result(buffer
, 10, 26, false);
1140 result
->pipeline_statistics
.ia_primitives
+=
1141 r600_query_read_result(buffer
, 12, 28, false);
1142 result
->pipeline_statistics
.ia_vertices
+=
1143 r600_query_read_result(buffer
, 14, 30, false);
1145 #if 0 /* for testing */
1146 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1147 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1148 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1149 result
->pipeline_statistics
.ia_vertices
,
1150 result
->pipeline_statistics
.ia_primitives
,
1151 result
->pipeline_statistics
.vs_invocations
,
1152 result
->pipeline_statistics
.hs_invocations
,
1153 result
->pipeline_statistics
.ds_invocations
,
1154 result
->pipeline_statistics
.gs_invocations
,
1155 result
->pipeline_statistics
.gs_primitives
,
1156 result
->pipeline_statistics
.c_invocations
,
1157 result
->pipeline_statistics
.c_primitives
,
1158 result
->pipeline_statistics
.ps_invocations
,
1159 result
->pipeline_statistics
.cs_invocations
);
1167 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
1168 struct pipe_query
*query
, boolean wait
,
1169 union pipe_query_result
*result
)
1171 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1172 struct r600_query
*rquery
= (struct r600_query
*)query
;
1174 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
1177 static void r600_get_query_result_resource(struct pipe_context
*ctx
,
1178 struct pipe_query
*query
,
1180 enum pipe_query_value_type result_type
,
1182 struct pipe_resource
*resource
,
1185 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1186 struct r600_query
*rquery
= (struct r600_query
*)query
;
1188 rquery
->ops
->get_result_resource(rctx
, rquery
, wait
, result_type
, index
,
1192 static void r600_query_hw_clear_result(struct r600_query_hw
*query
,
1193 union pipe_query_result
*result
)
1195 util_query_clear_result(result
, query
->b
.type
);
1198 bool r600_query_hw_get_result(struct r600_common_context
*rctx
,
1199 struct r600_query
*rquery
,
1200 bool wait
, union pipe_query_result
*result
)
1202 struct r600_common_screen
*rscreen
= rctx
->screen
;
1203 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1204 struct r600_query_buffer
*qbuf
;
1206 query
->ops
->clear_result(query
, result
);
1208 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1209 unsigned results_base
= 0;
1212 map
= r600_buffer_map_sync_with_rings(rctx
, qbuf
->buf
,
1213 PIPE_TRANSFER_READ
|
1214 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
1218 while (results_base
!= qbuf
->results_end
) {
1219 query
->ops
->add_result(rscreen
, query
, map
+ results_base
,
1221 results_base
+= query
->result_size
;
1225 /* Convert the time to expected units. */
1226 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1227 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1228 result
->u64
= (1000000 * result
->u64
) / rscreen
->info
.clock_crystal_freq
;
1233 /* Create the compute shader that is used to collect the results.
1235 * One compute grid with a single thread is launched for every query result
1236 * buffer. The thread (optionally) reads a previous summary buffer, then
1237 * accumulates data from the query result buffer, and writes the result either
1238 * to a summary buffer to be consumed by the next grid invocation or to the
1239 * user-supplied buffer.
1245 * 0.y = result_stride
1246 * 0.z = result_count
1248 * 1: read previously accumulated values
1249 * 2: write accumulated values for chaining
1250 * 4: write result available
1251 * 8: convert result to boolean (0/1)
1252 * 16: only read one dword and use that as result
1253 * 32: apply timestamp conversion
1254 * 64: store full 64 bits result
1255 * 128: store signed 32 bits result
1256 * 1.x = fence_offset
1260 * BUFFER[0] = query result buffer
1261 * BUFFER[1] = previous summary buffer
1262 * BUFFER[2] = next summary buffer or user-supplied buffer
1264 static void r600_create_query_result_shader(struct r600_common_context
*rctx
)
1266 /* TEMP[0].xy = accumulated result so far
1267 * TEMP[0].z = result not available
1269 * TEMP[1].x = current result index
1270 * TEMP[1].y = current pair index
1272 static const char text_tmpl
[] =
1274 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1275 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1276 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1282 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1283 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1284 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1285 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1287 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1289 /* Check result availability. */
1290 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1291 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1292 "MOV TEMP[1], TEMP[0].zzzz\n"
1293 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1295 /* Load result if available. */
1297 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1300 /* Load previously accumulated result if requested. */
1301 "MOV TEMP[0], IMM[0].xxxx\n"
1302 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1304 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1307 "MOV TEMP[1].x, IMM[0].xxxx\n"
1309 /* Break if accumulated result so far is not available. */
1310 "UIF TEMP[0].zzzz\n"
1314 /* Break if result_index >= result_count. */
1315 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1320 /* Load fence and check result availability */
1321 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1322 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1323 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1324 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1325 "UIF TEMP[0].zzzz\n"
1329 "MOV TEMP[1].y, IMM[0].xxxx\n"
1331 /* Load start and end. */
1332 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1333 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1334 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1336 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1337 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1339 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1340 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1342 /* Increment pair index */
1343 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1344 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1350 /* Increment result index */
1351 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1355 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1357 /* Store accumulated data for chaining. */
1358 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1360 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1362 /* Store result availability. */
1363 "NOT TEMP[0].z, TEMP[0]\n"
1364 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1365 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1367 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1369 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1372 /* Store result if it is available. */
1373 "NOT TEMP[4], TEMP[0].zzzz\n"
1375 /* Apply timestamp conversion */
1376 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1378 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1379 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1382 /* Convert to boolean */
1383 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1385 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1386 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1387 "MOV TEMP[0].y, IMM[0].xxxx\n"
1390 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1392 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1395 "UIF TEMP[0].yyyy\n"
1396 "MOV TEMP[0].x, IMM[0].wwww\n"
1399 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1401 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1404 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1412 char text
[sizeof(text_tmpl
) + 32];
1413 struct tgsi_token tokens
[1024];
1414 struct pipe_compute_state state
= {};
1416 /* Hard code the frequency into the shader so that the backend can
1417 * use the full range of optimizations for divide-by-constant.
1419 snprintf(text
, sizeof(text
), text_tmpl
,
1420 rctx
->screen
->info
.clock_crystal_freq
);
1422 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
1427 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
1428 state
.prog
= tokens
;
1430 rctx
->query_result_shader
= rctx
->b
.create_compute_state(&rctx
->b
, &state
);
1433 static void r600_restore_qbo_state(struct r600_common_context
*rctx
,
1434 struct r600_qbo_state
*st
)
1436 rctx
->b
.bind_compute_state(&rctx
->b
, st
->saved_compute
);
1438 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1439 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1441 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
);
1442 for (unsigned i
= 0; i
< 3; ++i
)
1443 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1446 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
1447 struct r600_query
*rquery
,
1449 enum pipe_query_value_type result_type
,
1451 struct pipe_resource
*resource
,
1454 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1455 struct r600_query_buffer
*qbuf
;
1456 struct r600_query_buffer
*qbuf_prev
;
1457 struct pipe_resource
*tmp_buffer
= NULL
;
1458 unsigned tmp_buffer_offset
= 0;
1459 struct r600_qbo_state saved_state
= {};
1460 struct pipe_grid_info grid
= {};
1461 struct pipe_constant_buffer constant_buffer
= {};
1462 struct pipe_shader_buffer ssbo
[3];
1463 struct r600_hw_query_params params
;
1465 uint32_t end_offset
;
1466 uint32_t result_stride
;
1467 uint32_t result_count
;
1469 uint32_t fence_offset
;
1470 uint32_t pair_stride
;
1471 uint32_t pair_count
;
1474 if (!rctx
->query_result_shader
) {
1475 r600_create_query_result_shader(rctx
);
1476 if (!rctx
->query_result_shader
)
1480 if (query
->buffer
.previous
) {
1481 u_suballocator_alloc(rctx
->allocator_zeroed_memory
, 16, 16,
1482 &tmp_buffer_offset
, &tmp_buffer
);
1487 rctx
->save_qbo_state(&rctx
->b
, &saved_state
);
1489 r600_get_hw_query_params(rctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1490 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1491 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1492 consts
.result_stride
= query
->result_size
;
1493 consts
.pair_stride
= params
.pair_stride
;
1494 consts
.pair_count
= params
.pair_count
;
1496 constant_buffer
.buffer_size
= sizeof(consts
);
1497 constant_buffer
.user_buffer
= &consts
;
1499 ssbo
[1].buffer
= tmp_buffer
;
1500 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1501 ssbo
[1].buffer_size
= 16;
1505 rctx
->b
.bind_compute_state(&rctx
->b
, rctx
->query_result_shader
);
1517 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1518 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
)
1520 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1521 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1522 consts
.config
|= 32;
1524 switch (result_type
) {
1525 case PIPE_QUERY_TYPE_U64
:
1526 case PIPE_QUERY_TYPE_I64
:
1527 consts
.config
|= 64;
1529 case PIPE_QUERY_TYPE_I32
:
1530 consts
.config
|= 128;
1532 case PIPE_QUERY_TYPE_U32
:
1536 rctx
->flags
|= rctx
->screen
->barrier_flags
.cp_to_L2
;
1538 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1539 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1540 qbuf_prev
= qbuf
->previous
;
1541 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1542 consts
.config
&= ~3;
1543 if (qbuf
!= &query
->buffer
)
1548 /* Only read the last timestamp. */
1550 consts
.result_count
= 0;
1551 consts
.config
|= 16;
1552 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1555 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1557 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1558 ssbo
[0].buffer_offset
= params
.start_offset
;
1559 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1561 if (!qbuf
->previous
) {
1562 ssbo
[2].buffer
= resource
;
1563 ssbo
[2].buffer_offset
= offset
;
1564 ssbo
[2].buffer_size
= 8;
1566 ((struct r600_resource
*)resource
)->TC_L2_dirty
= true;
1569 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
);
1571 if (wait
&& qbuf
== &query
->buffer
) {
1574 /* Wait for result availability. Wait only for readiness
1575 * of the last entry, since the fence writes should be
1576 * serialized in the CP.
1578 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1579 va
+= params
.fence_offset
;
1581 r600_gfx_wait_fence(rctx
, va
, 0x80000000, 0x80000000);
1584 rctx
->b
.launch_grid(&rctx
->b
, &grid
);
1585 rctx
->flags
|= rctx
->screen
->barrier_flags
.compute_to_L2
;
1588 r600_restore_qbo_state(rctx
, &saved_state
);
1589 pipe_resource_reference(&tmp_buffer
, NULL
);
1592 static void r600_render_condition(struct pipe_context
*ctx
,
1593 struct pipe_query
*query
,
1595 enum pipe_render_cond_flag mode
)
1597 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1598 struct r600_query_hw
*rquery
= (struct r600_query_hw
*)query
;
1599 struct r600_query_buffer
*qbuf
;
1600 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
1602 rctx
->render_cond
= query
;
1603 rctx
->render_cond_invert
= condition
;
1604 rctx
->render_cond_mode
= mode
;
1606 /* Compute the size of SET_PREDICATION packets. */
1609 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
1610 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
1613 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
1616 void r600_suspend_queries(struct r600_common_context
*ctx
)
1618 struct r600_query_hw
*query
;
1620 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1621 r600_query_hw_emit_stop(ctx
, query
);
1623 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1626 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
1627 struct list_head
*query_list
)
1629 struct r600_query_hw
*query
;
1630 unsigned num_dw
= 0;
1632 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
1634 num_dw
+= query
->num_cs_dw_begin
+ query
->num_cs_dw_end
;
1636 /* Workaround for the fact that
1637 * num_cs_dw_nontimer_queries_suspend is incremented for every
1638 * resumed query, which raises the bar in need_cs_space for
1639 * queries about to be resumed.
1641 num_dw
+= query
->num_cs_dw_end
;
1643 /* primitives generated query */
1644 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
1645 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1651 void r600_resume_queries(struct r600_common_context
*ctx
)
1653 struct r600_query_hw
*query
;
1654 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, &ctx
->active_queries
);
1656 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1658 /* Check CS space here. Resuming must not be interrupted by flushes. */
1659 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, true);
1661 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1662 r600_query_hw_emit_start(ctx
, query
);
1666 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1667 void r600_query_fix_enabled_rb_mask(struct r600_common_screen
*rscreen
)
1669 struct r600_common_context
*ctx
=
1670 (struct r600_common_context
*)rscreen
->aux_context
;
1671 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
1672 struct r600_resource
*buffer
;
1674 unsigned i
, mask
= 0;
1675 unsigned max_rbs
= ctx
->screen
->info
.num_render_backends
;
1677 assert(rscreen
->chip_class
<= CAYMAN
);
1679 /* if backend_map query is supported by the kernel */
1680 if (rscreen
->info
.r600_gb_backend_map_valid
) {
1681 unsigned num_tile_pipes
= rscreen
->info
.num_tile_pipes
;
1682 unsigned backend_map
= rscreen
->info
.r600_gb_backend_map
;
1683 unsigned item_width
, item_mask
;
1685 if (ctx
->chip_class
>= EVERGREEN
) {
1693 while (num_tile_pipes
--) {
1694 i
= backend_map
& item_mask
;
1696 backend_map
>>= item_width
;
1699 rscreen
->info
.enabled_rb_mask
= mask
;
1704 /* otherwise backup path for older kernels */
1706 /* create buffer for event data */
1707 buffer
= (struct r600_resource
*)
1708 pipe_buffer_create(ctx
->b
.screen
, 0,
1709 PIPE_USAGE_STAGING
, max_rbs
* 16);
1713 /* initialize buffer with zeroes */
1714 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
1716 memset(results
, 0, max_rbs
* 4 * 4);
1718 /* emit EVENT_WRITE for ZPASS_DONE */
1719 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1720 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
1721 radeon_emit(cs
, buffer
->gpu_address
);
1722 radeon_emit(cs
, buffer
->gpu_address
>> 32);
1724 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
1725 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
1727 /* analyze results */
1728 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
1730 for(i
= 0; i
< max_rbs
; i
++) {
1731 /* at least highest bit will be set if backend is used */
1732 if (results
[i
*4 + 1])
1738 r600_resource_reference(&buffer
, NULL
);
1741 rscreen
->info
.enabled_rb_mask
= mask
;
1744 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1747 .query_type = R600_QUERY_##query_type_, \
1748 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1749 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1750 .group_id = group_id_ \
1753 #define X(name_, query_type_, type_, result_type_) \
1754 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1756 #define XG(group_, name_, query_type_, type_, result_type_) \
1757 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1759 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1760 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1761 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1762 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS
, UINT64
, CUMULATIVE
),
1763 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1764 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1765 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1766 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1767 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1768 X("cp-dma-calls", CP_DMA_CALLS
, UINT64
, AVERAGE
),
1769 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1770 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1771 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1772 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES
, UINT64
, AVERAGE
),
1773 X("num-L2-invalidates", NUM_L2_INVALIDATES
, UINT64
, AVERAGE
),
1774 X("num-L2-writebacks", NUM_L2_WRITEBACKS
, UINT64
, AVERAGE
),
1775 X("CS-thread-busy", CS_THREAD_BUSY
, UINT64
, AVERAGE
),
1776 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1777 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1778 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1779 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1780 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1781 X("num-mapped-buffers", NUM_MAPPED_BUFFERS
, UINT64
, AVERAGE
),
1782 X("num-GFX-IBs", NUM_GFX_IBS
, UINT64
, AVERAGE
),
1783 X("num-SDMA-IBs", NUM_SDMA_IBS
, UINT64
, AVERAGE
),
1784 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1785 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1786 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1787 X("VRAM-vis-usage", VRAM_VIS_USAGE
, BYTES
, AVERAGE
),
1788 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1789 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1791 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1792 * which use it as a fallback path to detect the GPU type.
1794 * Note: The names of these queries are significant for GPUPerfStudio
1795 * (and possibly their order as well). */
1796 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1797 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1798 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1799 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1800 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1802 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1803 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1804 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1806 /* The following queries must be at the end of the list because their
1807 * availability is adjusted dynamically based on the DRM version. */
1808 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1809 X("GPU-shaders-busy", GPU_SHADERS_BUSY
, UINT64
, AVERAGE
),
1810 X("GPU-ta-busy", GPU_TA_BUSY
, UINT64
, AVERAGE
),
1811 X("GPU-gds-busy", GPU_GDS_BUSY
, UINT64
, AVERAGE
),
1812 X("GPU-vgt-busy", GPU_VGT_BUSY
, UINT64
, AVERAGE
),
1813 X("GPU-ia-busy", GPU_IA_BUSY
, UINT64
, AVERAGE
),
1814 X("GPU-sx-busy", GPU_SX_BUSY
, UINT64
, AVERAGE
),
1815 X("GPU-wd-busy", GPU_WD_BUSY
, UINT64
, AVERAGE
),
1816 X("GPU-bci-busy", GPU_BCI_BUSY
, UINT64
, AVERAGE
),
1817 X("GPU-sc-busy", GPU_SC_BUSY
, UINT64
, AVERAGE
),
1818 X("GPU-pa-busy", GPU_PA_BUSY
, UINT64
, AVERAGE
),
1819 X("GPU-db-busy", GPU_DB_BUSY
, UINT64
, AVERAGE
),
1820 X("GPU-cp-busy", GPU_CP_BUSY
, UINT64
, AVERAGE
),
1821 X("GPU-cb-busy", GPU_CB_BUSY
, UINT64
, AVERAGE
),
1822 X("GPU-sdma-busy", GPU_SDMA_BUSY
, UINT64
, AVERAGE
),
1823 X("GPU-pfp-busy", GPU_PFP_BUSY
, UINT64
, AVERAGE
),
1824 X("GPU-meq-busy", GPU_MEQ_BUSY
, UINT64
, AVERAGE
),
1825 X("GPU-me-busy", GPU_ME_BUSY
, UINT64
, AVERAGE
),
1826 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY
, UINT64
, AVERAGE
),
1827 X("GPU-dma-busy", GPU_DMA_BUSY
, UINT64
, AVERAGE
),
1828 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY
, UINT64
, AVERAGE
),
1829 X("GPU-ce-busy", GPU_CE_BUSY
, UINT64
, AVERAGE
),
1836 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1838 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1839 return ARRAY_SIZE(r600_driver_query_list
);
1840 else if (rscreen
->info
.drm_major
== 3) {
1841 if (rscreen
->chip_class
>= VI
)
1842 return ARRAY_SIZE(r600_driver_query_list
);
1844 return ARRAY_SIZE(r600_driver_query_list
) - 7;
1847 return ARRAY_SIZE(r600_driver_query_list
) - 25;
1850 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
1852 struct pipe_driver_query_info
*info
)
1854 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1855 unsigned num_queries
= r600_get_num_queries(rscreen
);
1858 unsigned num_perfcounters
=
1859 r600_get_perfcounter_info(rscreen
, 0, NULL
);
1861 return num_queries
+ num_perfcounters
;
1864 if (index
>= num_queries
)
1865 return r600_get_perfcounter_info(rscreen
, index
- num_queries
, info
);
1867 *info
= r600_driver_query_list
[index
];
1869 switch (info
->query_type
) {
1870 case R600_QUERY_REQUESTED_VRAM
:
1871 case R600_QUERY_VRAM_USAGE
:
1872 case R600_QUERY_MAPPED_VRAM
:
1873 info
->max_value
.u64
= rscreen
->info
.vram_size
;
1875 case R600_QUERY_REQUESTED_GTT
:
1876 case R600_QUERY_GTT_USAGE
:
1877 case R600_QUERY_MAPPED_GTT
:
1878 info
->max_value
.u64
= rscreen
->info
.gart_size
;
1880 case R600_QUERY_GPU_TEMPERATURE
:
1881 info
->max_value
.u64
= 125;
1883 case R600_QUERY_VRAM_VIS_USAGE
:
1884 info
->max_value
.u64
= rscreen
->info
.vram_vis_size
;
1888 if (info
->group_id
!= ~(unsigned)0 && rscreen
->perfcounters
)
1889 info
->group_id
+= rscreen
->perfcounters
->num_groups
;
1894 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1895 * performance counter groups, so be careful when changing this and related
1898 static int r600_get_driver_query_group_info(struct pipe_screen
*screen
,
1900 struct pipe_driver_query_group_info
*info
)
1902 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1903 unsigned num_pc_groups
= 0;
1905 if (rscreen
->perfcounters
)
1906 num_pc_groups
= rscreen
->perfcounters
->num_groups
;
1909 return num_pc_groups
+ R600_NUM_SW_QUERY_GROUPS
;
1911 if (index
< num_pc_groups
)
1912 return r600_get_perfcounter_group_info(rscreen
, index
, info
);
1914 index
-= num_pc_groups
;
1915 if (index
>= R600_NUM_SW_QUERY_GROUPS
)
1918 info
->name
= "GPIN";
1919 info
->max_active_queries
= 5;
1920 info
->num_queries
= 5;
1924 void r600_query_init(struct r600_common_context
*rctx
)
1926 rctx
->b
.create_query
= r600_create_query
;
1927 rctx
->b
.create_batch_query
= r600_create_batch_query
;
1928 rctx
->b
.destroy_query
= r600_destroy_query
;
1929 rctx
->b
.begin_query
= r600_begin_query
;
1930 rctx
->b
.end_query
= r600_end_query
;
1931 rctx
->b
.get_query_result
= r600_get_query_result
;
1932 rctx
->b
.get_query_result_resource
= r600_get_query_result_resource
;
1933 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
1935 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.num_render_backends
> 0)
1936 rctx
->b
.render_condition
= r600_render_condition
;
1938 LIST_INITHEAD(&rctx
->active_queries
);
1941 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
1943 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;
1944 rscreen
->b
.get_driver_query_group_info
= r600_get_driver_query_group_info
;