2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw
{
33 uint64_t begin_result
;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle
*fence
;
39 static void r600_query_sw_destroy(struct r600_common_context
*rctx
,
40 struct r600_query
*rquery
)
42 struct pipe_screen
*screen
= rctx
->b
.screen
;
43 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
45 screen
->fence_reference(screen
, &query
->fence
, NULL
);
49 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
52 case R600_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
53 case R600_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
54 case R600_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
55 case R600_QUERY_NUM_CS_FLUSHES
: return RADEON_NUM_CS_FLUSHES
;
56 case R600_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
57 case R600_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
58 case R600_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
59 case R600_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
60 case R600_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
61 case R600_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
62 default: unreachable("query type does not correspond to winsys id");
66 static bool r600_query_sw_begin(struct r600_common_context
*rctx
,
67 struct r600_query
*rquery
)
69 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
71 switch(query
->b
.type
) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
73 case PIPE_QUERY_GPU_FINISHED
:
75 case R600_QUERY_DRAW_CALLS
:
76 query
->begin_result
= rctx
->num_draw_calls
;
78 case R600_QUERY_SPILL_DRAW_CALLS
:
79 query
->begin_result
= rctx
->num_spill_draw_calls
;
81 case R600_QUERY_COMPUTE_CALLS
:
82 query
->begin_result
= rctx
->num_compute_calls
;
84 case R600_QUERY_SPILL_COMPUTE_CALLS
:
85 query
->begin_result
= rctx
->num_spill_compute_calls
;
87 case R600_QUERY_DMA_CALLS
:
88 query
->begin_result
= rctx
->num_dma_calls
;
90 case R600_QUERY_REQUESTED_VRAM
:
91 case R600_QUERY_REQUESTED_GTT
:
92 case R600_QUERY_VRAM_USAGE
:
93 case R600_QUERY_GTT_USAGE
:
94 case R600_QUERY_GPU_TEMPERATURE
:
95 case R600_QUERY_CURRENT_GPU_SCLK
:
96 case R600_QUERY_CURRENT_GPU_MCLK
:
97 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
98 query
->begin_result
= 0;
100 case R600_QUERY_BUFFER_WAIT_TIME
:
101 case R600_QUERY_NUM_CS_FLUSHES
:
102 case R600_QUERY_NUM_BYTES_MOVED
: {
103 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
104 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
107 case R600_QUERY_GPU_LOAD
:
108 query
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
110 case R600_QUERY_NUM_COMPILATIONS
:
111 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
113 case R600_QUERY_NUM_SHADERS_CREATED
:
114 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
116 case R600_QUERY_GPIN_ASIC_ID
:
117 case R600_QUERY_GPIN_NUM_SIMD
:
118 case R600_QUERY_GPIN_NUM_RB
:
119 case R600_QUERY_GPIN_NUM_SPI
:
120 case R600_QUERY_GPIN_NUM_SE
:
123 unreachable("r600_query_sw_begin: bad query type");
129 static bool r600_query_sw_end(struct r600_common_context
*rctx
,
130 struct r600_query
*rquery
)
132 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
134 switch(query
->b
.type
) {
135 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
137 case PIPE_QUERY_GPU_FINISHED
:
138 rctx
->b
.flush(&rctx
->b
, &query
->fence
, 0);
140 case R600_QUERY_DRAW_CALLS
:
141 query
->end_result
= rctx
->num_draw_calls
;
143 case R600_QUERY_SPILL_DRAW_CALLS
:
144 query
->end_result
= rctx
->num_spill_draw_calls
;
146 case R600_QUERY_COMPUTE_CALLS
:
147 query
->end_result
= rctx
->num_compute_calls
;
149 case R600_QUERY_SPILL_COMPUTE_CALLS
:
150 query
->end_result
= rctx
->num_spill_compute_calls
;
152 case R600_QUERY_DMA_CALLS
:
153 query
->end_result
= rctx
->num_dma_calls
;
155 case R600_QUERY_REQUESTED_VRAM
:
156 case R600_QUERY_REQUESTED_GTT
:
157 case R600_QUERY_VRAM_USAGE
:
158 case R600_QUERY_GTT_USAGE
:
159 case R600_QUERY_GPU_TEMPERATURE
:
160 case R600_QUERY_CURRENT_GPU_SCLK
:
161 case R600_QUERY_CURRENT_GPU_MCLK
:
162 case R600_QUERY_BUFFER_WAIT_TIME
:
163 case R600_QUERY_NUM_CS_FLUSHES
:
164 case R600_QUERY_NUM_BYTES_MOVED
: {
165 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
166 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
169 case R600_QUERY_GPU_LOAD
:
170 query
->end_result
= r600_gpu_load_end(rctx
->screen
,
171 query
->begin_result
);
172 query
->begin_result
= 0;
174 case R600_QUERY_NUM_COMPILATIONS
:
175 query
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
177 case R600_QUERY_NUM_SHADERS_CREATED
:
178 query
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
180 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
181 query
->end_result
= rctx
->last_tex_ps_draw_ratio
;
183 case R600_QUERY_GPIN_ASIC_ID
:
184 case R600_QUERY_GPIN_NUM_SIMD
:
185 case R600_QUERY_GPIN_NUM_RB
:
186 case R600_QUERY_GPIN_NUM_SPI
:
187 case R600_QUERY_GPIN_NUM_SE
:
190 unreachable("r600_query_sw_end: bad query type");
196 static bool r600_query_sw_get_result(struct r600_common_context
*rctx
,
197 struct r600_query
*rquery
,
199 union pipe_query_result
*result
)
201 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
203 switch (query
->b
.type
) {
204 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
205 /* Convert from cycles per millisecond to cycles per second (Hz). */
206 result
->timestamp_disjoint
.frequency
=
207 (uint64_t)rctx
->screen
->info
.clock_crystal_freq
* 1000;
208 result
->timestamp_disjoint
.disjoint
= false;
210 case PIPE_QUERY_GPU_FINISHED
: {
211 struct pipe_screen
*screen
= rctx
->b
.screen
;
212 result
->b
= screen
->fence_finish(screen
, query
->fence
,
213 wait
? PIPE_TIMEOUT_INFINITE
: 0);
217 case R600_QUERY_GPIN_ASIC_ID
:
220 case R600_QUERY_GPIN_NUM_SIMD
:
221 result
->u32
= rctx
->screen
->info
.num_good_compute_units
;
223 case R600_QUERY_GPIN_NUM_RB
:
224 result
->u32
= rctx
->screen
->info
.num_render_backends
;
226 case R600_QUERY_GPIN_NUM_SPI
:
227 result
->u32
= 1; /* all supported chips have one SPI per SE */
229 case R600_QUERY_GPIN_NUM_SE
:
230 result
->u32
= rctx
->screen
->info
.max_se
;
234 result
->u64
= query
->end_result
- query
->begin_result
;
236 switch (query
->b
.type
) {
237 case R600_QUERY_BUFFER_WAIT_TIME
:
238 case R600_QUERY_GPU_TEMPERATURE
:
241 case R600_QUERY_CURRENT_GPU_SCLK
:
242 case R600_QUERY_CURRENT_GPU_MCLK
:
243 result
->u64
*= 1000000;
250 static struct r600_query_ops sw_query_ops
= {
251 .destroy
= r600_query_sw_destroy
,
252 .begin
= r600_query_sw_begin
,
253 .end
= r600_query_sw_end
,
254 .get_result
= r600_query_sw_get_result
257 static struct pipe_query
*r600_query_sw_create(struct pipe_context
*ctx
,
260 struct r600_query_sw
*query
;
262 query
= CALLOC_STRUCT(r600_query_sw
);
266 query
->b
.type
= query_type
;
267 query
->b
.ops
= &sw_query_ops
;
269 return (struct pipe_query
*)query
;
272 void r600_query_hw_destroy(struct r600_common_context
*rctx
,
273 struct r600_query
*rquery
)
275 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
276 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
278 /* Release all query buffers. */
280 struct r600_query_buffer
*qbuf
= prev
;
281 prev
= prev
->previous
;
282 r600_resource_reference(&qbuf
->buf
, NULL
);
286 r600_resource_reference(&query
->buffer
.buf
, NULL
);
290 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
,
291 struct r600_query_hw
*query
)
293 unsigned buf_size
= MAX2(query
->result_size
,
294 ctx
->screen
->info
.gart_page_size
);
296 /* Queries are normally read by the CPU after
297 * being written by the gpu, hence staging is probably a good
300 struct r600_resource
*buf
= (struct r600_resource
*)
301 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
302 PIPE_USAGE_STAGING
, buf_size
);
306 if (query
->flags
& R600_QUERY_HW_FLAG_PREDICATE
) {
307 if (!query
->ops
->prepare_buffer(ctx
, query
, buf
)) {
308 r600_resource_reference(&buf
, NULL
);
316 static bool r600_query_hw_prepare_buffer(struct r600_common_context
*ctx
,
317 struct r600_query_hw
*query
,
318 struct r600_resource
*buffer
)
320 /* Callers ensure that the buffer is currently unused by the GPU. */
321 uint32_t *results
= ctx
->ws
->buffer_map(buffer
->buf
, NULL
,
322 PIPE_TRANSFER_WRITE
|
323 PIPE_TRANSFER_UNSYNCHRONIZED
);
327 memset(results
, 0, buffer
->b
.b
.width0
);
329 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
330 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
331 unsigned num_results
;
334 /* Set top bits for unused backends. */
335 num_results
= buffer
->b
.b
.width0
/ (16 * ctx
->max_db
);
336 for (j
= 0; j
< num_results
; j
++) {
337 for (i
= 0; i
< ctx
->max_db
; i
++) {
338 if (!(ctx
->backend_mask
& (1<<i
))) {
339 results
[(i
* 4)+1] = 0x80000000;
340 results
[(i
* 4)+3] = 0x80000000;
343 results
+= 4 * ctx
->max_db
;
350 static struct r600_query_ops query_hw_ops
= {
351 .destroy
= r600_query_hw_destroy
,
352 .begin
= r600_query_hw_begin
,
353 .end
= r600_query_hw_end
,
354 .get_result
= r600_query_hw_get_result
,
357 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
358 struct r600_query_hw
*query
,
359 struct r600_resource
*buffer
,
361 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
362 struct r600_query_hw
*query
,
363 struct r600_resource
*buffer
,
365 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
366 struct r600_query_hw
*, void *buffer
,
367 union pipe_query_result
*result
);
368 static void r600_query_hw_clear_result(struct r600_query_hw
*,
369 union pipe_query_result
*);
371 static struct r600_query_hw_ops query_hw_default_hw_ops
= {
372 .prepare_buffer
= r600_query_hw_prepare_buffer
,
373 .emit_start
= r600_query_hw_do_emit_start
,
374 .emit_stop
= r600_query_hw_do_emit_stop
,
375 .clear_result
= r600_query_hw_clear_result
,
376 .add_result
= r600_query_hw_add_result
,
379 bool r600_query_hw_init(struct r600_common_context
*rctx
,
380 struct r600_query_hw
*query
)
382 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
383 if (!query
->buffer
.buf
)
389 static struct pipe_query
*r600_query_hw_create(struct r600_common_context
*rctx
,
393 struct r600_query_hw
*query
= CALLOC_STRUCT(r600_query_hw
);
397 query
->b
.type
= query_type
;
398 query
->b
.ops
= &query_hw_ops
;
399 query
->ops
= &query_hw_default_hw_ops
;
401 switch (query_type
) {
402 case PIPE_QUERY_OCCLUSION_COUNTER
:
403 case PIPE_QUERY_OCCLUSION_PREDICATE
:
404 query
->result_size
= 16 * rctx
->max_db
;
405 query
->num_cs_dw_begin
= 6;
406 query
->num_cs_dw_end
= 6;
407 query
->flags
|= R600_QUERY_HW_FLAG_PREDICATE
;
409 case PIPE_QUERY_TIME_ELAPSED
:
410 query
->result_size
= 16;
411 query
->num_cs_dw_begin
= 8;
412 query
->num_cs_dw_end
= 8;
414 case PIPE_QUERY_TIMESTAMP
:
415 query
->result_size
= 8;
416 query
->num_cs_dw_end
= 8;
417 query
->flags
= R600_QUERY_HW_FLAG_NO_START
;
419 case PIPE_QUERY_PRIMITIVES_EMITTED
:
420 case PIPE_QUERY_PRIMITIVES_GENERATED
:
421 case PIPE_QUERY_SO_STATISTICS
:
422 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
423 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
424 query
->result_size
= 32;
425 query
->num_cs_dw_begin
= 6;
426 query
->num_cs_dw_end
= 6;
427 query
->stream
= index
;
428 query
->flags
|= R600_QUERY_HW_FLAG_PREDICATE
;
430 case PIPE_QUERY_PIPELINE_STATISTICS
:
431 /* 11 values on EG, 8 on R600. */
432 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
433 query
->num_cs_dw_begin
= 6;
434 query
->num_cs_dw_end
= 6;
442 if (!r600_query_hw_init(rctx
, query
)) {
447 return (struct pipe_query
*)query
;
450 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
451 unsigned type
, int diff
)
453 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
454 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
455 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
456 bool old_perfect_enable
=
457 rctx
->num_perfect_occlusion_queries
!= 0;
458 bool enable
, perfect_enable
;
460 rctx
->num_occlusion_queries
+= diff
;
461 assert(rctx
->num_occlusion_queries
>= 0);
463 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
464 rctx
->num_perfect_occlusion_queries
+= diff
;
465 assert(rctx
->num_perfect_occlusion_queries
>= 0);
468 enable
= rctx
->num_occlusion_queries
!= 0;
469 perfect_enable
= rctx
->num_perfect_occlusion_queries
!= 0;
471 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
472 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
477 static unsigned event_type_for_stream(struct r600_query_hw
*query
)
479 switch (query
->stream
) {
481 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
482 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
483 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
484 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
488 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
489 struct r600_query_hw
*query
,
490 struct r600_resource
*buffer
,
493 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
495 switch (query
->b
.type
) {
496 case PIPE_QUERY_OCCLUSION_COUNTER
:
497 case PIPE_QUERY_OCCLUSION_PREDICATE
:
498 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
499 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
501 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
503 case PIPE_QUERY_PRIMITIVES_EMITTED
:
504 case PIPE_QUERY_PRIMITIVES_GENERATED
:
505 case PIPE_QUERY_SO_STATISTICS
:
506 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
507 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
508 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
510 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
512 case PIPE_QUERY_TIME_ELAPSED
:
513 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
514 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
516 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
520 case PIPE_QUERY_PIPELINE_STATISTICS
:
521 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
522 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
524 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
529 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
533 static void r600_query_hw_emit_start(struct r600_common_context
*ctx
,
534 struct r600_query_hw
*query
)
538 if (!query
->buffer
.buf
)
539 return; // previous buffer allocation failure
541 r600_update_occlusion_query_state(ctx
, query
->b
.type
, 1);
542 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, 1);
544 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_begin
+ query
->num_cs_dw_end
,
547 /* Get a new query buffer if needed. */
548 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
549 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
550 *qbuf
= query
->buffer
;
551 query
->buffer
.results_end
= 0;
552 query
->buffer
.previous
= qbuf
;
553 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
);
554 if (!query
->buffer
.buf
)
558 /* emit begin query */
559 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
561 query
->ops
->emit_start(ctx
, query
, query
->buffer
.buf
, va
);
563 ctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
566 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
567 struct r600_query_hw
*query
,
568 struct r600_resource
*buffer
,
571 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
573 switch (query
->b
.type
) {
574 case PIPE_QUERY_OCCLUSION_COUNTER
:
575 case PIPE_QUERY_OCCLUSION_PREDICATE
:
577 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
578 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
580 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
582 case PIPE_QUERY_PRIMITIVES_EMITTED
:
583 case PIPE_QUERY_PRIMITIVES_GENERATED
:
584 case PIPE_QUERY_SO_STATISTICS
:
585 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
586 va
+= query
->result_size
/2;
587 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
588 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
590 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
592 case PIPE_QUERY_TIME_ELAPSED
:
593 va
+= query
->result_size
/2;
595 case PIPE_QUERY_TIMESTAMP
:
596 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
597 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
599 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
603 case PIPE_QUERY_PIPELINE_STATISTICS
:
604 va
+= query
->result_size
/2;
605 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
606 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
608 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
613 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
617 static void r600_query_hw_emit_stop(struct r600_common_context
*ctx
,
618 struct r600_query_hw
*query
)
622 if (!query
->buffer
.buf
)
623 return; // previous buffer allocation failure
625 /* The queries which need begin already called this in begin_query. */
626 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
627 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_end
, false);
631 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
633 query
->ops
->emit_stop(ctx
, query
, query
->buffer
.buf
, va
);
635 query
->buffer
.results_end
+= query
->result_size
;
637 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
638 ctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
640 r600_update_occlusion_query_state(ctx
, query
->b
.type
, -1);
641 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, -1);
644 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
645 struct r600_atom
*atom
)
647 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
648 struct r600_query_hw
*query
= (struct r600_query_hw
*)ctx
->render_cond
;
649 struct r600_query_buffer
*qbuf
;
656 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
657 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
659 switch (query
->b
.type
) {
660 case PIPE_QUERY_OCCLUSION_COUNTER
:
661 case PIPE_QUERY_OCCLUSION_PREDICATE
:
662 op
= PRED_OP(PREDICATION_OP_ZPASS
);
664 case PIPE_QUERY_PRIMITIVES_EMITTED
:
665 case PIPE_QUERY_PRIMITIVES_GENERATED
:
666 case PIPE_QUERY_SO_STATISTICS
:
667 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
668 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
675 /* if true then invert, see GL_ARB_conditional_render_inverted */
676 if (ctx
->render_cond_invert
)
677 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
679 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
681 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
683 /* emit predicate packets for all data blocks */
684 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
685 unsigned results_base
= 0;
686 uint64_t va
= qbuf
->buf
->gpu_address
;
688 while (results_base
< qbuf
->results_end
) {
689 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
690 radeon_emit(cs
, va
+ results_base
);
691 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32) & 0xFF));
692 r600_emit_reloc(ctx
, &ctx
->gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
694 results_base
+= query
->result_size
;
696 /* set CONTINUE bit for all packets except the first */
697 op
|= PREDICATION_CONTINUE
;
702 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
704 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
706 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
707 query_type
== PIPE_QUERY_GPU_FINISHED
||
708 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
709 return r600_query_sw_create(ctx
, query_type
);
711 return r600_query_hw_create(rctx
, query_type
, index
);
714 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
716 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
717 struct r600_query
*rquery
= (struct r600_query
*)query
;
719 rquery
->ops
->destroy(rctx
, rquery
);
722 static boolean
r600_begin_query(struct pipe_context
*ctx
,
723 struct pipe_query
*query
)
725 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
726 struct r600_query
*rquery
= (struct r600_query
*)query
;
728 return rquery
->ops
->begin(rctx
, rquery
);
731 void r600_query_hw_reset_buffers(struct r600_common_context
*rctx
,
732 struct r600_query_hw
*query
)
734 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
736 /* Discard the old query buffers. */
738 struct r600_query_buffer
*qbuf
= prev
;
739 prev
= prev
->previous
;
740 r600_resource_reference(&qbuf
->buf
, NULL
);
744 query
->buffer
.results_end
= 0;
745 query
->buffer
.previous
= NULL
;
747 if (query
->flags
& R600_QUERY_HW_FLAG_PREDICATE
) {
748 /* Obtain a new buffer if the current one can't be mapped without a stall. */
749 if (r600_rings_is_buffer_referenced(rctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
750 !rctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
751 r600_resource_reference(&query
->buffer
.buf
, NULL
);
752 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
754 if (!query
->ops
->prepare_buffer(rctx
, query
, query
->buffer
.buf
))
755 r600_resource_reference(&query
->buffer
.buf
, NULL
);
760 bool r600_query_hw_begin(struct r600_common_context
*rctx
,
761 struct r600_query
*rquery
)
763 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
765 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
770 if (!(query
->flags
& R600_QUERY_HW_FLAG_BEGIN_RESUMES
))
771 r600_query_hw_reset_buffers(rctx
, query
);
773 r600_query_hw_emit_start(rctx
, query
);
774 if (!query
->buffer
.buf
)
777 LIST_ADDTAIL(&query
->list
, &rctx
->active_queries
);
781 static bool r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
783 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
784 struct r600_query
*rquery
= (struct r600_query
*)query
;
786 return rquery
->ops
->end(rctx
, rquery
);
789 bool r600_query_hw_end(struct r600_common_context
*rctx
,
790 struct r600_query
*rquery
)
792 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
794 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
)
795 r600_query_hw_reset_buffers(rctx
, query
);
797 r600_query_hw_emit_stop(rctx
, query
);
799 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
800 LIST_DELINIT(&query
->list
);
802 if (!query
->buffer
.buf
)
808 static unsigned r600_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
809 bool test_status_bit
)
811 uint32_t *current_result
= (uint32_t*)map
;
814 start
= (uint64_t)current_result
[start_index
] |
815 (uint64_t)current_result
[start_index
+1] << 32;
816 end
= (uint64_t)current_result
[end_index
] |
817 (uint64_t)current_result
[end_index
+1] << 32;
819 if (!test_status_bit
||
820 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
826 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
827 struct r600_query_hw
*query
,
829 union pipe_query_result
*result
)
831 switch (query
->b
.type
) {
832 case PIPE_QUERY_OCCLUSION_COUNTER
: {
833 unsigned results_base
= 0;
834 while (results_base
!= query
->result_size
) {
836 r600_query_read_result(buffer
+ results_base
, 0, 2, true);
841 case PIPE_QUERY_OCCLUSION_PREDICATE
: {
842 unsigned results_base
= 0;
843 while (results_base
!= query
->result_size
) {
844 result
->b
= result
->b
||
845 r600_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
850 case PIPE_QUERY_TIME_ELAPSED
:
851 result
->u64
+= r600_query_read_result(buffer
, 0, 2, false);
853 case PIPE_QUERY_TIMESTAMP
:
855 uint32_t *current_result
= (uint32_t*)buffer
;
856 result
->u64
= (uint64_t)current_result
[0] |
857 (uint64_t)current_result
[1] << 32;
860 case PIPE_QUERY_PRIMITIVES_EMITTED
:
861 /* SAMPLE_STREAMOUTSTATS stores this structure:
863 * u64 NumPrimitivesWritten;
864 * u64 PrimitiveStorageNeeded;
866 * We only need NumPrimitivesWritten here. */
867 result
->u64
+= r600_query_read_result(buffer
, 2, 6, true);
869 case PIPE_QUERY_PRIMITIVES_GENERATED
:
870 /* Here we read PrimitiveStorageNeeded. */
871 result
->u64
+= r600_query_read_result(buffer
, 0, 4, true);
873 case PIPE_QUERY_SO_STATISTICS
:
874 result
->so_statistics
.num_primitives_written
+=
875 r600_query_read_result(buffer
, 2, 6, true);
876 result
->so_statistics
.primitives_storage_needed
+=
877 r600_query_read_result(buffer
, 0, 4, true);
879 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
880 result
->b
= result
->b
||
881 r600_query_read_result(buffer
, 2, 6, true) !=
882 r600_query_read_result(buffer
, 0, 4, true);
884 case PIPE_QUERY_PIPELINE_STATISTICS
:
885 if (ctx
->chip_class
>= EVERGREEN
) {
886 result
->pipeline_statistics
.ps_invocations
+=
887 r600_query_read_result(buffer
, 0, 22, false);
888 result
->pipeline_statistics
.c_primitives
+=
889 r600_query_read_result(buffer
, 2, 24, false);
890 result
->pipeline_statistics
.c_invocations
+=
891 r600_query_read_result(buffer
, 4, 26, false);
892 result
->pipeline_statistics
.vs_invocations
+=
893 r600_query_read_result(buffer
, 6, 28, false);
894 result
->pipeline_statistics
.gs_invocations
+=
895 r600_query_read_result(buffer
, 8, 30, false);
896 result
->pipeline_statistics
.gs_primitives
+=
897 r600_query_read_result(buffer
, 10, 32, false);
898 result
->pipeline_statistics
.ia_primitives
+=
899 r600_query_read_result(buffer
, 12, 34, false);
900 result
->pipeline_statistics
.ia_vertices
+=
901 r600_query_read_result(buffer
, 14, 36, false);
902 result
->pipeline_statistics
.hs_invocations
+=
903 r600_query_read_result(buffer
, 16, 38, false);
904 result
->pipeline_statistics
.ds_invocations
+=
905 r600_query_read_result(buffer
, 18, 40, false);
906 result
->pipeline_statistics
.cs_invocations
+=
907 r600_query_read_result(buffer
, 20, 42, false);
909 result
->pipeline_statistics
.ps_invocations
+=
910 r600_query_read_result(buffer
, 0, 16, false);
911 result
->pipeline_statistics
.c_primitives
+=
912 r600_query_read_result(buffer
, 2, 18, false);
913 result
->pipeline_statistics
.c_invocations
+=
914 r600_query_read_result(buffer
, 4, 20, false);
915 result
->pipeline_statistics
.vs_invocations
+=
916 r600_query_read_result(buffer
, 6, 22, false);
917 result
->pipeline_statistics
.gs_invocations
+=
918 r600_query_read_result(buffer
, 8, 24, false);
919 result
->pipeline_statistics
.gs_primitives
+=
920 r600_query_read_result(buffer
, 10, 26, false);
921 result
->pipeline_statistics
.ia_primitives
+=
922 r600_query_read_result(buffer
, 12, 28, false);
923 result
->pipeline_statistics
.ia_vertices
+=
924 r600_query_read_result(buffer
, 14, 30, false);
926 #if 0 /* for testing */
927 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
928 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
929 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
930 result
->pipeline_statistics
.ia_vertices
,
931 result
->pipeline_statistics
.ia_primitives
,
932 result
->pipeline_statistics
.vs_invocations
,
933 result
->pipeline_statistics
.hs_invocations
,
934 result
->pipeline_statistics
.ds_invocations
,
935 result
->pipeline_statistics
.gs_invocations
,
936 result
->pipeline_statistics
.gs_primitives
,
937 result
->pipeline_statistics
.c_invocations
,
938 result
->pipeline_statistics
.c_primitives
,
939 result
->pipeline_statistics
.ps_invocations
,
940 result
->pipeline_statistics
.cs_invocations
);
948 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
949 struct pipe_query
*query
, boolean wait
,
950 union pipe_query_result
*result
)
952 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
953 struct r600_query
*rquery
= (struct r600_query
*)query
;
955 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
958 static void r600_query_hw_clear_result(struct r600_query_hw
*query
,
959 union pipe_query_result
*result
)
961 util_query_clear_result(result
, query
->b
.type
);
964 bool r600_query_hw_get_result(struct r600_common_context
*rctx
,
965 struct r600_query
*rquery
,
966 bool wait
, union pipe_query_result
*result
)
968 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
969 struct r600_query_buffer
*qbuf
;
971 query
->ops
->clear_result(query
, result
);
973 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
974 unsigned results_base
= 0;
977 map
= r600_buffer_map_sync_with_rings(rctx
, qbuf
->buf
,
979 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
983 while (results_base
!= qbuf
->results_end
) {
984 query
->ops
->add_result(rctx
, query
, map
+ results_base
,
986 results_base
+= query
->result_size
;
990 /* Convert the time to expected units. */
991 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
992 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
993 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.clock_crystal_freq
;
998 static void r600_render_condition(struct pipe_context
*ctx
,
999 struct pipe_query
*query
,
1003 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1004 struct r600_query_hw
*rquery
= (struct r600_query_hw
*)query
;
1005 struct r600_query_buffer
*qbuf
;
1006 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
1008 rctx
->render_cond
= query
;
1009 rctx
->render_cond_invert
= condition
;
1010 rctx
->render_cond_mode
= mode
;
1012 /* Compute the size of SET_PREDICATION packets. */
1015 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
1016 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
1019 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
1022 void r600_suspend_queries(struct r600_common_context
*ctx
)
1024 struct r600_query_hw
*query
;
1026 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1027 r600_query_hw_emit_stop(ctx
, query
);
1029 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1032 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
1033 struct list_head
*query_list
)
1035 struct r600_query_hw
*query
;
1036 unsigned num_dw
= 0;
1038 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
1040 num_dw
+= query
->num_cs_dw_begin
+ query
->num_cs_dw_end
;
1042 /* Workaround for the fact that
1043 * num_cs_dw_nontimer_queries_suspend is incremented for every
1044 * resumed query, which raises the bar in need_cs_space for
1045 * queries about to be resumed.
1047 num_dw
+= query
->num_cs_dw_end
;
1049 /* primitives generated query */
1050 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
1051 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1057 void r600_resume_queries(struct r600_common_context
*ctx
)
1059 struct r600_query_hw
*query
;
1060 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, &ctx
->active_queries
);
1062 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1064 /* Check CS space here. Resuming must not be interrupted by flushes. */
1065 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, true);
1067 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1068 r600_query_hw_emit_start(ctx
, query
);
1072 /* Get backends mask */
1073 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
1075 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
1076 struct r600_resource
*buffer
;
1078 unsigned num_backends
= ctx
->screen
->info
.num_render_backends
;
1079 unsigned i
, mask
= 0;
1081 /* if backend_map query is supported by the kernel */
1082 if (ctx
->screen
->info
.r600_gb_backend_map_valid
) {
1083 unsigned num_tile_pipes
= ctx
->screen
->info
.num_tile_pipes
;
1084 unsigned backend_map
= ctx
->screen
->info
.r600_gb_backend_map
;
1085 unsigned item_width
, item_mask
;
1087 if (ctx
->chip_class
>= EVERGREEN
) {
1095 while (num_tile_pipes
--) {
1096 i
= backend_map
& item_mask
;
1098 backend_map
>>= item_width
;
1101 ctx
->backend_mask
= mask
;
1106 /* otherwise backup path for older kernels */
1108 /* create buffer for event data */
1109 buffer
= (struct r600_resource
*)
1110 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
1111 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
1115 /* initialize buffer with zeroes */
1116 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
1118 memset(results
, 0, ctx
->max_db
* 4 * 4);
1120 /* emit EVENT_WRITE for ZPASS_DONE */
1121 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1122 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
1123 radeon_emit(cs
, buffer
->gpu_address
);
1124 radeon_emit(cs
, buffer
->gpu_address
>> 32);
1126 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
1127 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
1129 /* analyze results */
1130 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
1132 for(i
= 0; i
< ctx
->max_db
; i
++) {
1133 /* at least highest bit will be set if backend is used */
1134 if (results
[i
*4 + 1])
1140 r600_resource_reference(&buffer
, NULL
);
1143 ctx
->backend_mask
= mask
;
1148 /* fallback to old method - set num_backends lower bits to 1 */
1149 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
1153 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1156 .query_type = R600_QUERY_##query_type_, \
1157 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1158 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1159 .group_id = group_id_ \
1162 #define X(name_, query_type_, type_, result_type_) \
1163 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1165 #define XG(group_, name_, query_type_, type_, result_type_) \
1166 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1168 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1169 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1170 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1171 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1172 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1173 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1174 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1175 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1176 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1177 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1178 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1179 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1180 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1181 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1182 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1183 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1185 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1186 * which use it as a fallback path to detect the GPU type.
1188 * Note: The names of these queries are significant for GPUPerfStudio
1189 * (and possibly their order as well). */
1190 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1191 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1192 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1193 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1194 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1196 /* The following queries must be at the end of the list because their
1197 * availability is adjusted dynamically based on the DRM version. */
1198 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1199 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1200 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1201 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1208 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1210 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1211 return ARRAY_SIZE(r600_driver_query_list
);
1212 else if (rscreen
->info
.drm_major
== 3)
1213 return ARRAY_SIZE(r600_driver_query_list
) - 3;
1215 return ARRAY_SIZE(r600_driver_query_list
) - 4;
1218 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
1220 struct pipe_driver_query_info
*info
)
1222 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1223 unsigned num_queries
= r600_get_num_queries(rscreen
);
1226 unsigned num_perfcounters
=
1227 r600_get_perfcounter_info(rscreen
, 0, NULL
);
1229 return num_queries
+ num_perfcounters
;
1232 if (index
>= num_queries
)
1233 return r600_get_perfcounter_info(rscreen
, index
- num_queries
, info
);
1235 *info
= r600_driver_query_list
[index
];
1237 switch (info
->query_type
) {
1238 case R600_QUERY_REQUESTED_VRAM
:
1239 case R600_QUERY_VRAM_USAGE
:
1240 info
->max_value
.u64
= rscreen
->info
.vram_size
;
1242 case R600_QUERY_REQUESTED_GTT
:
1243 case R600_QUERY_GTT_USAGE
:
1244 info
->max_value
.u64
= rscreen
->info
.gart_size
;
1246 case R600_QUERY_GPU_TEMPERATURE
:
1247 info
->max_value
.u64
= 125;
1251 if (info
->group_id
!= ~(unsigned)0 && rscreen
->perfcounters
)
1252 info
->group_id
+= rscreen
->perfcounters
->num_groups
;
1257 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1258 * performance counter groups, so be careful when changing this and related
1261 static int r600_get_driver_query_group_info(struct pipe_screen
*screen
,
1263 struct pipe_driver_query_group_info
*info
)
1265 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1266 unsigned num_pc_groups
= 0;
1268 if (rscreen
->perfcounters
)
1269 num_pc_groups
= rscreen
->perfcounters
->num_groups
;
1272 return num_pc_groups
+ R600_NUM_SW_QUERY_GROUPS
;
1274 if (index
< num_pc_groups
)
1275 return r600_get_perfcounter_group_info(rscreen
, index
, info
);
1277 index
-= num_pc_groups
;
1278 if (index
>= R600_NUM_SW_QUERY_GROUPS
)
1281 info
->name
= "GPIN";
1282 info
->max_active_queries
= 5;
1283 info
->num_queries
= 5;
1287 void r600_query_init(struct r600_common_context
*rctx
)
1289 rctx
->b
.create_query
= r600_create_query
;
1290 rctx
->b
.create_batch_query
= r600_create_batch_query
;
1291 rctx
->b
.destroy_query
= r600_destroy_query
;
1292 rctx
->b
.begin_query
= r600_begin_query
;
1293 rctx
->b
.end_query
= r600_end_query
;
1294 rctx
->b
.get_query_result
= r600_get_query_result
;
1295 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
1297 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.num_render_backends
> 0)
1298 rctx
->b
.render_condition
= r600_render_condition
;
1300 LIST_INITHEAD(&rctx
->active_queries
);
1303 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
1305 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;
1306 rscreen
->b
.get_driver_query_group_info
= r600_get_driver_query_group_info
;