2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
29 struct r600_hw_query_params
{
30 unsigned start_offset
;
32 unsigned fence_offset
;
37 /* Queries without buffer handling or suspend/resume. */
38 struct r600_query_sw
{
41 uint64_t begin_result
;
43 /* Fence for GPU_FINISHED. */
44 struct pipe_fence_handle
*fence
;
47 static void r600_query_sw_destroy(struct r600_common_context
*rctx
,
48 struct r600_query
*rquery
)
50 struct pipe_screen
*screen
= rctx
->b
.screen
;
51 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
53 screen
->fence_reference(screen
, &query
->fence
, NULL
);
57 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
60 case R600_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
61 case R600_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
62 case R600_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
63 case R600_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
64 case R600_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
65 case R600_QUERY_NUM_CTX_FLUSHES
: return RADEON_NUM_CS_FLUSHES
;
66 case R600_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
67 case R600_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
68 case R600_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
69 case R600_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
70 case R600_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
71 case R600_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
72 case R600_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
73 default: unreachable("query type does not correspond to winsys id");
77 static bool r600_query_sw_begin(struct r600_common_context
*rctx
,
78 struct r600_query
*rquery
)
80 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
82 switch(query
->b
.type
) {
83 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
84 case PIPE_QUERY_GPU_FINISHED
:
86 case R600_QUERY_DRAW_CALLS
:
87 query
->begin_result
= rctx
->num_draw_calls
;
89 case R600_QUERY_SPILL_DRAW_CALLS
:
90 query
->begin_result
= rctx
->num_spill_draw_calls
;
92 case R600_QUERY_COMPUTE_CALLS
:
93 query
->begin_result
= rctx
->num_compute_calls
;
95 case R600_QUERY_SPILL_COMPUTE_CALLS
:
96 query
->begin_result
= rctx
->num_spill_compute_calls
;
98 case R600_QUERY_DMA_CALLS
:
99 query
->begin_result
= rctx
->num_dma_calls
;
101 case R600_QUERY_NUM_VS_FLUSHES
:
102 query
->begin_result
= rctx
->num_vs_flushes
;
104 case R600_QUERY_NUM_PS_FLUSHES
:
105 query
->begin_result
= rctx
->num_ps_flushes
;
107 case R600_QUERY_NUM_CS_FLUSHES
:
108 query
->begin_result
= rctx
->num_cs_flushes
;
110 case R600_QUERY_REQUESTED_VRAM
:
111 case R600_QUERY_REQUESTED_GTT
:
112 case R600_QUERY_MAPPED_VRAM
:
113 case R600_QUERY_MAPPED_GTT
:
114 case R600_QUERY_VRAM_USAGE
:
115 case R600_QUERY_GTT_USAGE
:
116 case R600_QUERY_GPU_TEMPERATURE
:
117 case R600_QUERY_CURRENT_GPU_SCLK
:
118 case R600_QUERY_CURRENT_GPU_MCLK
:
119 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
120 query
->begin_result
= 0;
122 case R600_QUERY_BUFFER_WAIT_TIME
:
123 case R600_QUERY_NUM_CTX_FLUSHES
:
124 case R600_QUERY_NUM_BYTES_MOVED
:
125 case R600_QUERY_NUM_EVICTIONS
: {
126 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
127 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
130 case R600_QUERY_GPU_LOAD
:
131 query
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
133 case R600_QUERY_NUM_COMPILATIONS
:
134 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
136 case R600_QUERY_NUM_SHADERS_CREATED
:
137 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
139 case R600_QUERY_GPIN_ASIC_ID
:
140 case R600_QUERY_GPIN_NUM_SIMD
:
141 case R600_QUERY_GPIN_NUM_RB
:
142 case R600_QUERY_GPIN_NUM_SPI
:
143 case R600_QUERY_GPIN_NUM_SE
:
146 unreachable("r600_query_sw_begin: bad query type");
152 static bool r600_query_sw_end(struct r600_common_context
*rctx
,
153 struct r600_query
*rquery
)
155 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
157 switch(query
->b
.type
) {
158 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
160 case PIPE_QUERY_GPU_FINISHED
:
161 rctx
->b
.flush(&rctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
163 case R600_QUERY_DRAW_CALLS
:
164 query
->end_result
= rctx
->num_draw_calls
;
166 case R600_QUERY_SPILL_DRAW_CALLS
:
167 query
->end_result
= rctx
->num_spill_draw_calls
;
169 case R600_QUERY_COMPUTE_CALLS
:
170 query
->end_result
= rctx
->num_compute_calls
;
172 case R600_QUERY_SPILL_COMPUTE_CALLS
:
173 query
->end_result
= rctx
->num_spill_compute_calls
;
175 case R600_QUERY_DMA_CALLS
:
176 query
->end_result
= rctx
->num_dma_calls
;
178 case R600_QUERY_NUM_VS_FLUSHES
:
179 query
->end_result
= rctx
->num_vs_flushes
;
181 case R600_QUERY_NUM_PS_FLUSHES
:
182 query
->end_result
= rctx
->num_ps_flushes
;
184 case R600_QUERY_NUM_CS_FLUSHES
:
185 query
->end_result
= rctx
->num_cs_flushes
;
187 case R600_QUERY_REQUESTED_VRAM
:
188 case R600_QUERY_REQUESTED_GTT
:
189 case R600_QUERY_MAPPED_VRAM
:
190 case R600_QUERY_MAPPED_GTT
:
191 case R600_QUERY_VRAM_USAGE
:
192 case R600_QUERY_GTT_USAGE
:
193 case R600_QUERY_GPU_TEMPERATURE
:
194 case R600_QUERY_CURRENT_GPU_SCLK
:
195 case R600_QUERY_CURRENT_GPU_MCLK
:
196 case R600_QUERY_BUFFER_WAIT_TIME
:
197 case R600_QUERY_NUM_CTX_FLUSHES
:
198 case R600_QUERY_NUM_BYTES_MOVED
:
199 case R600_QUERY_NUM_EVICTIONS
: {
200 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
201 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
204 case R600_QUERY_GPU_LOAD
:
205 query
->end_result
= r600_gpu_load_end(rctx
->screen
,
206 query
->begin_result
);
207 query
->begin_result
= 0;
209 case R600_QUERY_NUM_COMPILATIONS
:
210 query
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
212 case R600_QUERY_NUM_SHADERS_CREATED
:
213 query
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
215 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
216 query
->end_result
= rctx
->last_tex_ps_draw_ratio
;
218 case R600_QUERY_GPIN_ASIC_ID
:
219 case R600_QUERY_GPIN_NUM_SIMD
:
220 case R600_QUERY_GPIN_NUM_RB
:
221 case R600_QUERY_GPIN_NUM_SPI
:
222 case R600_QUERY_GPIN_NUM_SE
:
225 unreachable("r600_query_sw_end: bad query type");
231 static bool r600_query_sw_get_result(struct r600_common_context
*rctx
,
232 struct r600_query
*rquery
,
234 union pipe_query_result
*result
)
236 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
238 switch (query
->b
.type
) {
239 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
240 /* Convert from cycles per millisecond to cycles per second (Hz). */
241 result
->timestamp_disjoint
.frequency
=
242 (uint64_t)rctx
->screen
->info
.clock_crystal_freq
* 1000;
243 result
->timestamp_disjoint
.disjoint
= false;
245 case PIPE_QUERY_GPU_FINISHED
: {
246 struct pipe_screen
*screen
= rctx
->b
.screen
;
247 result
->b
= screen
->fence_finish(screen
, &rctx
->b
, query
->fence
,
248 wait
? PIPE_TIMEOUT_INFINITE
: 0);
252 case R600_QUERY_GPIN_ASIC_ID
:
255 case R600_QUERY_GPIN_NUM_SIMD
:
256 result
->u32
= rctx
->screen
->info
.num_good_compute_units
;
258 case R600_QUERY_GPIN_NUM_RB
:
259 result
->u32
= rctx
->screen
->info
.num_render_backends
;
261 case R600_QUERY_GPIN_NUM_SPI
:
262 result
->u32
= 1; /* all supported chips have one SPI per SE */
264 case R600_QUERY_GPIN_NUM_SE
:
265 result
->u32
= rctx
->screen
->info
.max_se
;
269 result
->u64
= query
->end_result
- query
->begin_result
;
271 switch (query
->b
.type
) {
272 case R600_QUERY_BUFFER_WAIT_TIME
:
273 case R600_QUERY_GPU_TEMPERATURE
:
276 case R600_QUERY_CURRENT_GPU_SCLK
:
277 case R600_QUERY_CURRENT_GPU_MCLK
:
278 result
->u64
*= 1000000;
285 static struct r600_query_ops sw_query_ops
= {
286 .destroy
= r600_query_sw_destroy
,
287 .begin
= r600_query_sw_begin
,
288 .end
= r600_query_sw_end
,
289 .get_result
= r600_query_sw_get_result
292 static struct pipe_query
*r600_query_sw_create(struct pipe_context
*ctx
,
295 struct r600_query_sw
*query
;
297 query
= CALLOC_STRUCT(r600_query_sw
);
301 query
->b
.type
= query_type
;
302 query
->b
.ops
= &sw_query_ops
;
304 return (struct pipe_query
*)query
;
307 void r600_query_hw_destroy(struct r600_common_context
*rctx
,
308 struct r600_query
*rquery
)
310 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
311 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
313 /* Release all query buffers. */
315 struct r600_query_buffer
*qbuf
= prev
;
316 prev
= prev
->previous
;
317 r600_resource_reference(&qbuf
->buf
, NULL
);
321 r600_resource_reference(&query
->buffer
.buf
, NULL
);
325 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
,
326 struct r600_query_hw
*query
)
328 unsigned buf_size
= MAX2(query
->result_size
,
329 ctx
->screen
->info
.gart_page_size
);
331 /* Queries are normally read by the CPU after
332 * being written by the gpu, hence staging is probably a good
335 struct r600_resource
*buf
= (struct r600_resource
*)
336 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
337 PIPE_USAGE_STAGING
, buf_size
);
341 if (query
->flags
& R600_QUERY_HW_FLAG_PREDICATE
) {
342 if (!query
->ops
->prepare_buffer(ctx
, query
, buf
)) {
343 r600_resource_reference(&buf
, NULL
);
351 static bool r600_query_hw_prepare_buffer(struct r600_common_context
*ctx
,
352 struct r600_query_hw
*query
,
353 struct r600_resource
*buffer
)
355 /* Callers ensure that the buffer is currently unused by the GPU. */
356 uint32_t *results
= ctx
->ws
->buffer_map(buffer
->buf
, NULL
,
357 PIPE_TRANSFER_WRITE
|
358 PIPE_TRANSFER_UNSYNCHRONIZED
);
362 memset(results
, 0, buffer
->b
.b
.width0
);
364 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
365 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
366 unsigned num_results
;
369 /* Set top bits for unused backends. */
370 num_results
= buffer
->b
.b
.width0
/ query
->result_size
;
371 for (j
= 0; j
< num_results
; j
++) {
372 for (i
= 0; i
< ctx
->max_db
; i
++) {
373 if (!(ctx
->backend_mask
& (1<<i
))) {
374 results
[(i
* 4)+1] = 0x80000000;
375 results
[(i
* 4)+3] = 0x80000000;
378 results
+= 4 * ctx
->max_db
;
385 static struct r600_query_ops query_hw_ops
= {
386 .destroy
= r600_query_hw_destroy
,
387 .begin
= r600_query_hw_begin
,
388 .end
= r600_query_hw_end
,
389 .get_result
= r600_query_hw_get_result
,
392 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
393 struct r600_query_hw
*query
,
394 struct r600_resource
*buffer
,
396 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
397 struct r600_query_hw
*query
,
398 struct r600_resource
*buffer
,
400 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
401 struct r600_query_hw
*, void *buffer
,
402 union pipe_query_result
*result
);
403 static void r600_query_hw_clear_result(struct r600_query_hw
*,
404 union pipe_query_result
*);
406 static struct r600_query_hw_ops query_hw_default_hw_ops
= {
407 .prepare_buffer
= r600_query_hw_prepare_buffer
,
408 .emit_start
= r600_query_hw_do_emit_start
,
409 .emit_stop
= r600_query_hw_do_emit_stop
,
410 .clear_result
= r600_query_hw_clear_result
,
411 .add_result
= r600_query_hw_add_result
,
414 bool r600_query_hw_init(struct r600_common_context
*rctx
,
415 struct r600_query_hw
*query
)
417 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
418 if (!query
->buffer
.buf
)
424 static struct pipe_query
*r600_query_hw_create(struct r600_common_context
*rctx
,
428 struct r600_query_hw
*query
= CALLOC_STRUCT(r600_query_hw
);
432 query
->b
.type
= query_type
;
433 query
->b
.ops
= &query_hw_ops
;
434 query
->ops
= &query_hw_default_hw_ops
;
436 switch (query_type
) {
437 case PIPE_QUERY_OCCLUSION_COUNTER
:
438 case PIPE_QUERY_OCCLUSION_PREDICATE
:
439 query
->result_size
= 16 * rctx
->max_db
;
440 query
->result_size
+= 16; /* for the fence + alignment */
441 query
->num_cs_dw_begin
= 6;
442 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rctx
->screen
);
443 query
->flags
|= R600_QUERY_HW_FLAG_PREDICATE
;
445 case PIPE_QUERY_TIME_ELAPSED
:
446 query
->result_size
= 24;
447 query
->num_cs_dw_begin
= 8;
448 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rctx
->screen
);
450 case PIPE_QUERY_TIMESTAMP
:
451 query
->result_size
= 16;
452 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rctx
->screen
);
453 query
->flags
= R600_QUERY_HW_FLAG_NO_START
;
455 case PIPE_QUERY_PRIMITIVES_EMITTED
:
456 case PIPE_QUERY_PRIMITIVES_GENERATED
:
457 case PIPE_QUERY_SO_STATISTICS
:
458 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
459 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
460 query
->result_size
= 32;
461 query
->num_cs_dw_begin
= 6;
462 query
->num_cs_dw_end
= 6;
463 query
->stream
= index
;
464 query
->flags
|= R600_QUERY_HW_FLAG_PREDICATE
;
466 case PIPE_QUERY_PIPELINE_STATISTICS
:
467 /* 11 values on EG, 8 on R600. */
468 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
469 query
->result_size
+= 8; /* for the fence + alignment */
470 query
->num_cs_dw_begin
= 6;
471 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rctx
->screen
);
479 if (!r600_query_hw_init(rctx
, query
)) {
484 return (struct pipe_query
*)query
;
487 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
488 unsigned type
, int diff
)
490 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
491 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
492 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
493 bool old_perfect_enable
=
494 rctx
->num_perfect_occlusion_queries
!= 0;
495 bool enable
, perfect_enable
;
497 rctx
->num_occlusion_queries
+= diff
;
498 assert(rctx
->num_occlusion_queries
>= 0);
500 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
501 rctx
->num_perfect_occlusion_queries
+= diff
;
502 assert(rctx
->num_perfect_occlusion_queries
>= 0);
505 enable
= rctx
->num_occlusion_queries
!= 0;
506 perfect_enable
= rctx
->num_perfect_occlusion_queries
!= 0;
508 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
509 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
514 static unsigned event_type_for_stream(struct r600_query_hw
*query
)
516 switch (query
->stream
) {
518 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
519 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
520 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
521 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
525 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
526 struct r600_query_hw
*query
,
527 struct r600_resource
*buffer
,
530 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
532 switch (query
->b
.type
) {
533 case PIPE_QUERY_OCCLUSION_COUNTER
:
534 case PIPE_QUERY_OCCLUSION_PREDICATE
:
535 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
536 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
538 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
540 case PIPE_QUERY_PRIMITIVES_EMITTED
:
541 case PIPE_QUERY_PRIMITIVES_GENERATED
:
542 case PIPE_QUERY_SO_STATISTICS
:
543 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
544 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
545 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
547 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
549 case PIPE_QUERY_TIME_ELAPSED
:
550 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
551 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
553 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
557 case PIPE_QUERY_PIPELINE_STATISTICS
:
558 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
559 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
561 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
566 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
570 static void r600_query_hw_emit_start(struct r600_common_context
*ctx
,
571 struct r600_query_hw
*query
)
575 if (!query
->buffer
.buf
)
576 return; // previous buffer allocation failure
578 r600_update_occlusion_query_state(ctx
, query
->b
.type
, 1);
579 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, 1);
581 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_begin
+ query
->num_cs_dw_end
,
584 /* Get a new query buffer if needed. */
585 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
586 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
587 *qbuf
= query
->buffer
;
588 query
->buffer
.results_end
= 0;
589 query
->buffer
.previous
= qbuf
;
590 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
);
591 if (!query
->buffer
.buf
)
595 /* emit begin query */
596 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
598 query
->ops
->emit_start(ctx
, query
, query
->buffer
.buf
, va
);
600 ctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
603 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
604 struct r600_query_hw
*query
,
605 struct r600_resource
*buffer
,
608 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
610 switch (query
->b
.type
) {
611 case PIPE_QUERY_OCCLUSION_COUNTER
:
612 case PIPE_QUERY_OCCLUSION_PREDICATE
:
614 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
615 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
617 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
619 va
+= ctx
->max_db
* 16 - 8;
620 r600_gfx_write_fence(ctx
, va
, 0, 0x80000000);
622 case PIPE_QUERY_PRIMITIVES_EMITTED
:
623 case PIPE_QUERY_PRIMITIVES_GENERATED
:
624 case PIPE_QUERY_SO_STATISTICS
:
625 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
626 va
+= query
->result_size
/2;
627 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
628 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
630 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
632 case PIPE_QUERY_TIME_ELAPSED
:
635 case PIPE_QUERY_TIMESTAMP
:
636 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
637 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
639 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
644 r600_gfx_write_fence(ctx
, va
, 0, 0x80000000);
646 case PIPE_QUERY_PIPELINE_STATISTICS
: {
647 unsigned sample_size
= (query
->result_size
- 8) / 2;
650 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
651 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
653 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
656 r600_gfx_write_fence(ctx
, va
, 0, 0x80000000);
662 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
666 static void r600_query_hw_emit_stop(struct r600_common_context
*ctx
,
667 struct r600_query_hw
*query
)
671 if (!query
->buffer
.buf
)
672 return; // previous buffer allocation failure
674 /* The queries which need begin already called this in begin_query. */
675 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
676 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_end
, false);
680 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
682 query
->ops
->emit_stop(ctx
, query
, query
->buffer
.buf
, va
);
684 query
->buffer
.results_end
+= query
->result_size
;
686 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
687 ctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
689 r600_update_occlusion_query_state(ctx
, query
->b
.type
, -1);
690 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, -1);
693 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
694 struct r600_atom
*atom
)
696 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
697 struct r600_query_hw
*query
= (struct r600_query_hw
*)ctx
->render_cond
;
698 struct r600_query_buffer
*qbuf
;
705 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
706 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
708 switch (query
->b
.type
) {
709 case PIPE_QUERY_OCCLUSION_COUNTER
:
710 case PIPE_QUERY_OCCLUSION_PREDICATE
:
711 op
= PRED_OP(PREDICATION_OP_ZPASS
);
713 case PIPE_QUERY_PRIMITIVES_EMITTED
:
714 case PIPE_QUERY_PRIMITIVES_GENERATED
:
715 case PIPE_QUERY_SO_STATISTICS
:
716 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
717 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
724 /* if true then invert, see GL_ARB_conditional_render_inverted */
725 if (ctx
->render_cond_invert
)
726 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
728 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
730 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
732 /* emit predicate packets for all data blocks */
733 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
734 unsigned results_base
= 0;
735 uint64_t va
= qbuf
->buf
->gpu_address
;
737 while (results_base
< qbuf
->results_end
) {
738 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
739 radeon_emit(cs
, va
+ results_base
);
740 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32) & 0xFF));
741 r600_emit_reloc(ctx
, &ctx
->gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
743 results_base
+= query
->result_size
;
745 /* set CONTINUE bit for all packets except the first */
746 op
|= PREDICATION_CONTINUE
;
751 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
753 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
755 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
756 query_type
== PIPE_QUERY_GPU_FINISHED
||
757 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
758 return r600_query_sw_create(ctx
, query_type
);
760 return r600_query_hw_create(rctx
, query_type
, index
);
763 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
765 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
766 struct r600_query
*rquery
= (struct r600_query
*)query
;
768 rquery
->ops
->destroy(rctx
, rquery
);
771 static boolean
r600_begin_query(struct pipe_context
*ctx
,
772 struct pipe_query
*query
)
774 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
775 struct r600_query
*rquery
= (struct r600_query
*)query
;
777 return rquery
->ops
->begin(rctx
, rquery
);
780 void r600_query_hw_reset_buffers(struct r600_common_context
*rctx
,
781 struct r600_query_hw
*query
)
783 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
785 /* Discard the old query buffers. */
787 struct r600_query_buffer
*qbuf
= prev
;
788 prev
= prev
->previous
;
789 r600_resource_reference(&qbuf
->buf
, NULL
);
793 query
->buffer
.results_end
= 0;
794 query
->buffer
.previous
= NULL
;
796 if (query
->flags
& R600_QUERY_HW_FLAG_PREDICATE
) {
797 /* Obtain a new buffer if the current one can't be mapped without a stall. */
798 if (r600_rings_is_buffer_referenced(rctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
799 !rctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
800 r600_resource_reference(&query
->buffer
.buf
, NULL
);
801 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
803 if (!query
->ops
->prepare_buffer(rctx
, query
, query
->buffer
.buf
))
804 r600_resource_reference(&query
->buffer
.buf
, NULL
);
809 bool r600_query_hw_begin(struct r600_common_context
*rctx
,
810 struct r600_query
*rquery
)
812 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
814 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
819 if (!(query
->flags
& R600_QUERY_HW_FLAG_BEGIN_RESUMES
))
820 r600_query_hw_reset_buffers(rctx
, query
);
822 r600_query_hw_emit_start(rctx
, query
);
823 if (!query
->buffer
.buf
)
826 LIST_ADDTAIL(&query
->list
, &rctx
->active_queries
);
830 static bool r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
832 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
833 struct r600_query
*rquery
= (struct r600_query
*)query
;
835 return rquery
->ops
->end(rctx
, rquery
);
838 bool r600_query_hw_end(struct r600_common_context
*rctx
,
839 struct r600_query
*rquery
)
841 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
843 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
)
844 r600_query_hw_reset_buffers(rctx
, query
);
846 r600_query_hw_emit_stop(rctx
, query
);
848 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
849 LIST_DELINIT(&query
->list
);
851 if (!query
->buffer
.buf
)
857 static void r600_get_hw_query_params(struct r600_common_context
*rctx
,
858 struct r600_query_hw
*rquery
, int index
,
859 struct r600_hw_query_params
*params
)
861 params
->pair_stride
= 0;
862 params
->pair_count
= 1;
864 switch (rquery
->b
.type
) {
865 case PIPE_QUERY_OCCLUSION_COUNTER
:
866 case PIPE_QUERY_OCCLUSION_PREDICATE
:
867 params
->start_offset
= 0;
868 params
->end_offset
= 8;
869 params
->fence_offset
= rctx
->max_db
* 16;
870 params
->pair_stride
= 16;
871 params
->pair_count
= rctx
->max_db
;
873 case PIPE_QUERY_TIME_ELAPSED
:
874 params
->start_offset
= 0;
875 params
->end_offset
= 8;
876 params
->fence_offset
= 16;
878 case PIPE_QUERY_TIMESTAMP
:
879 params
->start_offset
= 0;
880 params
->end_offset
= 0;
881 params
->fence_offset
= 8;
883 case PIPE_QUERY_PRIMITIVES_EMITTED
:
884 params
->start_offset
= 8;
885 params
->end_offset
= 24;
886 params
->fence_offset
= params
->end_offset
+ 4;
888 case PIPE_QUERY_PRIMITIVES_GENERATED
:
889 params
->start_offset
= 0;
890 params
->end_offset
= 16;
891 params
->fence_offset
= params
->end_offset
+ 4;
893 case PIPE_QUERY_SO_STATISTICS
:
894 params
->start_offset
= 8 - index
* 8;
895 params
->end_offset
= 24 - index
* 8;
896 params
->fence_offset
= params
->end_offset
+ 4;
898 case PIPE_QUERY_PIPELINE_STATISTICS
:
900 /* Offsets apply to EG+ */
901 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
902 params
->start_offset
= offsets
[index
];
903 params
->end_offset
= 88 + offsets
[index
];
904 params
->fence_offset
= 2 * 88;
908 unreachable("r600_get_hw_query_params unsupported");
912 static unsigned r600_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
913 bool test_status_bit
)
915 uint32_t *current_result
= (uint32_t*)map
;
918 start
= (uint64_t)current_result
[start_index
] |
919 (uint64_t)current_result
[start_index
+1] << 32;
920 end
= (uint64_t)current_result
[end_index
] |
921 (uint64_t)current_result
[end_index
+1] << 32;
923 if (!test_status_bit
||
924 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
930 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
931 struct r600_query_hw
*query
,
933 union pipe_query_result
*result
)
935 switch (query
->b
.type
) {
936 case PIPE_QUERY_OCCLUSION_COUNTER
: {
937 for (unsigned i
= 0; i
< ctx
->max_db
; ++i
) {
938 unsigned results_base
= i
* 16;
940 r600_query_read_result(buffer
+ results_base
, 0, 2, true);
944 case PIPE_QUERY_OCCLUSION_PREDICATE
: {
945 for (unsigned i
= 0; i
< ctx
->max_db
; ++i
) {
946 unsigned results_base
= i
* 16;
947 result
->b
= result
->b
||
948 r600_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
952 case PIPE_QUERY_TIME_ELAPSED
:
953 result
->u64
+= r600_query_read_result(buffer
, 0, 2, false);
955 case PIPE_QUERY_TIMESTAMP
:
956 result
->u64
= *(uint64_t*)buffer
;
958 case PIPE_QUERY_PRIMITIVES_EMITTED
:
959 /* SAMPLE_STREAMOUTSTATS stores this structure:
961 * u64 NumPrimitivesWritten;
962 * u64 PrimitiveStorageNeeded;
964 * We only need NumPrimitivesWritten here. */
965 result
->u64
+= r600_query_read_result(buffer
, 2, 6, true);
967 case PIPE_QUERY_PRIMITIVES_GENERATED
:
968 /* Here we read PrimitiveStorageNeeded. */
969 result
->u64
+= r600_query_read_result(buffer
, 0, 4, true);
971 case PIPE_QUERY_SO_STATISTICS
:
972 result
->so_statistics
.num_primitives_written
+=
973 r600_query_read_result(buffer
, 2, 6, true);
974 result
->so_statistics
.primitives_storage_needed
+=
975 r600_query_read_result(buffer
, 0, 4, true);
977 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
978 result
->b
= result
->b
||
979 r600_query_read_result(buffer
, 2, 6, true) !=
980 r600_query_read_result(buffer
, 0, 4, true);
982 case PIPE_QUERY_PIPELINE_STATISTICS
:
983 if (ctx
->chip_class
>= EVERGREEN
) {
984 result
->pipeline_statistics
.ps_invocations
+=
985 r600_query_read_result(buffer
, 0, 22, false);
986 result
->pipeline_statistics
.c_primitives
+=
987 r600_query_read_result(buffer
, 2, 24, false);
988 result
->pipeline_statistics
.c_invocations
+=
989 r600_query_read_result(buffer
, 4, 26, false);
990 result
->pipeline_statistics
.vs_invocations
+=
991 r600_query_read_result(buffer
, 6, 28, false);
992 result
->pipeline_statistics
.gs_invocations
+=
993 r600_query_read_result(buffer
, 8, 30, false);
994 result
->pipeline_statistics
.gs_primitives
+=
995 r600_query_read_result(buffer
, 10, 32, false);
996 result
->pipeline_statistics
.ia_primitives
+=
997 r600_query_read_result(buffer
, 12, 34, false);
998 result
->pipeline_statistics
.ia_vertices
+=
999 r600_query_read_result(buffer
, 14, 36, false);
1000 result
->pipeline_statistics
.hs_invocations
+=
1001 r600_query_read_result(buffer
, 16, 38, false);
1002 result
->pipeline_statistics
.ds_invocations
+=
1003 r600_query_read_result(buffer
, 18, 40, false);
1004 result
->pipeline_statistics
.cs_invocations
+=
1005 r600_query_read_result(buffer
, 20, 42, false);
1007 result
->pipeline_statistics
.ps_invocations
+=
1008 r600_query_read_result(buffer
, 0, 16, false);
1009 result
->pipeline_statistics
.c_primitives
+=
1010 r600_query_read_result(buffer
, 2, 18, false);
1011 result
->pipeline_statistics
.c_invocations
+=
1012 r600_query_read_result(buffer
, 4, 20, false);
1013 result
->pipeline_statistics
.vs_invocations
+=
1014 r600_query_read_result(buffer
, 6, 22, false);
1015 result
->pipeline_statistics
.gs_invocations
+=
1016 r600_query_read_result(buffer
, 8, 24, false);
1017 result
->pipeline_statistics
.gs_primitives
+=
1018 r600_query_read_result(buffer
, 10, 26, false);
1019 result
->pipeline_statistics
.ia_primitives
+=
1020 r600_query_read_result(buffer
, 12, 28, false);
1021 result
->pipeline_statistics
.ia_vertices
+=
1022 r600_query_read_result(buffer
, 14, 30, false);
1024 #if 0 /* for testing */
1025 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1026 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1027 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1028 result
->pipeline_statistics
.ia_vertices
,
1029 result
->pipeline_statistics
.ia_primitives
,
1030 result
->pipeline_statistics
.vs_invocations
,
1031 result
->pipeline_statistics
.hs_invocations
,
1032 result
->pipeline_statistics
.ds_invocations
,
1033 result
->pipeline_statistics
.gs_invocations
,
1034 result
->pipeline_statistics
.gs_primitives
,
1035 result
->pipeline_statistics
.c_invocations
,
1036 result
->pipeline_statistics
.c_primitives
,
1037 result
->pipeline_statistics
.ps_invocations
,
1038 result
->pipeline_statistics
.cs_invocations
);
1046 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
1047 struct pipe_query
*query
, boolean wait
,
1048 union pipe_query_result
*result
)
1050 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1051 struct r600_query
*rquery
= (struct r600_query
*)query
;
1053 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
1056 static void r600_query_hw_clear_result(struct r600_query_hw
*query
,
1057 union pipe_query_result
*result
)
1059 util_query_clear_result(result
, query
->b
.type
);
1062 bool r600_query_hw_get_result(struct r600_common_context
*rctx
,
1063 struct r600_query
*rquery
,
1064 bool wait
, union pipe_query_result
*result
)
1066 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1067 struct r600_query_buffer
*qbuf
;
1069 query
->ops
->clear_result(query
, result
);
1071 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1072 unsigned results_base
= 0;
1075 map
= r600_buffer_map_sync_with_rings(rctx
, qbuf
->buf
,
1076 PIPE_TRANSFER_READ
|
1077 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
1081 while (results_base
!= qbuf
->results_end
) {
1082 query
->ops
->add_result(rctx
, query
, map
+ results_base
,
1084 results_base
+= query
->result_size
;
1088 /* Convert the time to expected units. */
1089 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1090 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1091 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.clock_crystal_freq
;
1096 static void r600_render_condition(struct pipe_context
*ctx
,
1097 struct pipe_query
*query
,
1101 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1102 struct r600_query_hw
*rquery
= (struct r600_query_hw
*)query
;
1103 struct r600_query_buffer
*qbuf
;
1104 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
1106 rctx
->render_cond
= query
;
1107 rctx
->render_cond_invert
= condition
;
1108 rctx
->render_cond_mode
= mode
;
1110 /* Compute the size of SET_PREDICATION packets. */
1113 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
1114 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
1117 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
1120 void r600_suspend_queries(struct r600_common_context
*ctx
)
1122 struct r600_query_hw
*query
;
1124 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1125 r600_query_hw_emit_stop(ctx
, query
);
1127 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1130 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
1131 struct list_head
*query_list
)
1133 struct r600_query_hw
*query
;
1134 unsigned num_dw
= 0;
1136 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
1138 num_dw
+= query
->num_cs_dw_begin
+ query
->num_cs_dw_end
;
1140 /* Workaround for the fact that
1141 * num_cs_dw_nontimer_queries_suspend is incremented for every
1142 * resumed query, which raises the bar in need_cs_space for
1143 * queries about to be resumed.
1145 num_dw
+= query
->num_cs_dw_end
;
1147 /* primitives generated query */
1148 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
1149 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1155 void r600_resume_queries(struct r600_common_context
*ctx
)
1157 struct r600_query_hw
*query
;
1158 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, &ctx
->active_queries
);
1160 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1162 /* Check CS space here. Resuming must not be interrupted by flushes. */
1163 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, true);
1165 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1166 r600_query_hw_emit_start(ctx
, query
);
1170 /* Get backends mask */
1171 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
1173 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
1174 struct r600_resource
*buffer
;
1176 unsigned num_backends
= ctx
->screen
->info
.num_render_backends
;
1177 unsigned i
, mask
= 0;
1179 /* if backend_map query is supported by the kernel */
1180 if (ctx
->screen
->info
.r600_gb_backend_map_valid
) {
1181 unsigned num_tile_pipes
= ctx
->screen
->info
.num_tile_pipes
;
1182 unsigned backend_map
= ctx
->screen
->info
.r600_gb_backend_map
;
1183 unsigned item_width
, item_mask
;
1185 if (ctx
->chip_class
>= EVERGREEN
) {
1193 while (num_tile_pipes
--) {
1194 i
= backend_map
& item_mask
;
1196 backend_map
>>= item_width
;
1199 ctx
->backend_mask
= mask
;
1204 /* otherwise backup path for older kernels */
1206 /* create buffer for event data */
1207 buffer
= (struct r600_resource
*)
1208 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
1209 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
1213 /* initialize buffer with zeroes */
1214 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
1216 memset(results
, 0, ctx
->max_db
* 4 * 4);
1218 /* emit EVENT_WRITE for ZPASS_DONE */
1219 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1220 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
1221 radeon_emit(cs
, buffer
->gpu_address
);
1222 radeon_emit(cs
, buffer
->gpu_address
>> 32);
1224 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
1225 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
1227 /* analyze results */
1228 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
1230 for(i
= 0; i
< ctx
->max_db
; i
++) {
1231 /* at least highest bit will be set if backend is used */
1232 if (results
[i
*4 + 1])
1238 r600_resource_reference(&buffer
, NULL
);
1241 ctx
->backend_mask
= mask
;
1246 /* fallback to old method - set num_backends lower bits to 1 */
1247 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
1251 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1254 .query_type = R600_QUERY_##query_type_, \
1255 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1256 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1257 .group_id = group_id_ \
1260 #define X(name_, query_type_, type_, result_type_) \
1261 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1263 #define XG(group_, name_, query_type_, type_, result_type_) \
1264 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1266 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1267 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1268 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1269 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1270 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1271 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1272 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1273 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1274 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1275 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1276 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1277 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1278 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1279 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1280 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1281 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1282 X("num-ctx-flushes", NUM_CTX_FLUSHES
, UINT64
, AVERAGE
),
1283 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1284 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1285 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1286 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1287 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1289 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1290 * which use it as a fallback path to detect the GPU type.
1292 * Note: The names of these queries are significant for GPUPerfStudio
1293 * (and possibly their order as well). */
1294 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1295 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1296 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1297 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1298 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1300 /* The following queries must be at the end of the list because their
1301 * availability is adjusted dynamically based on the DRM version. */
1302 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1303 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1304 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1305 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1312 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1314 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1315 return ARRAY_SIZE(r600_driver_query_list
);
1316 else if (rscreen
->info
.drm_major
== 3)
1317 return ARRAY_SIZE(r600_driver_query_list
) - 3;
1319 return ARRAY_SIZE(r600_driver_query_list
) - 4;
1322 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
1324 struct pipe_driver_query_info
*info
)
1326 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1327 unsigned num_queries
= r600_get_num_queries(rscreen
);
1330 unsigned num_perfcounters
=
1331 r600_get_perfcounter_info(rscreen
, 0, NULL
);
1333 return num_queries
+ num_perfcounters
;
1336 if (index
>= num_queries
)
1337 return r600_get_perfcounter_info(rscreen
, index
- num_queries
, info
);
1339 *info
= r600_driver_query_list
[index
];
1341 switch (info
->query_type
) {
1342 case R600_QUERY_REQUESTED_VRAM
:
1343 case R600_QUERY_VRAM_USAGE
:
1344 case R600_QUERY_MAPPED_VRAM
:
1345 info
->max_value
.u64
= rscreen
->info
.vram_size
;
1347 case R600_QUERY_REQUESTED_GTT
:
1348 case R600_QUERY_GTT_USAGE
:
1349 case R600_QUERY_MAPPED_GTT
:
1350 info
->max_value
.u64
= rscreen
->info
.gart_size
;
1352 case R600_QUERY_GPU_TEMPERATURE
:
1353 info
->max_value
.u64
= 125;
1357 if (info
->group_id
!= ~(unsigned)0 && rscreen
->perfcounters
)
1358 info
->group_id
+= rscreen
->perfcounters
->num_groups
;
1363 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1364 * performance counter groups, so be careful when changing this and related
1367 static int r600_get_driver_query_group_info(struct pipe_screen
*screen
,
1369 struct pipe_driver_query_group_info
*info
)
1371 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1372 unsigned num_pc_groups
= 0;
1374 if (rscreen
->perfcounters
)
1375 num_pc_groups
= rscreen
->perfcounters
->num_groups
;
1378 return num_pc_groups
+ R600_NUM_SW_QUERY_GROUPS
;
1380 if (index
< num_pc_groups
)
1381 return r600_get_perfcounter_group_info(rscreen
, index
, info
);
1383 index
-= num_pc_groups
;
1384 if (index
>= R600_NUM_SW_QUERY_GROUPS
)
1387 info
->name
= "GPIN";
1388 info
->max_active_queries
= 5;
1389 info
->num_queries
= 5;
1393 void r600_query_init(struct r600_common_context
*rctx
)
1395 rctx
->b
.create_query
= r600_create_query
;
1396 rctx
->b
.create_batch_query
= r600_create_batch_query
;
1397 rctx
->b
.destroy_query
= r600_destroy_query
;
1398 rctx
->b
.begin_query
= r600_begin_query
;
1399 rctx
->b
.end_query
= r600_end_query
;
1400 rctx
->b
.get_query_result
= r600_get_query_result
;
1401 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
1403 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.num_render_backends
> 0)
1404 rctx
->b
.render_condition
= r600_render_condition
;
1406 LIST_INITHEAD(&rctx
->active_queries
);
1409 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
1411 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;
1412 rscreen
->b
.get_driver_query_group_info
= r600_get_driver_query_group_info
;