2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
30 struct r600_query_buffer
{
31 /* The buffer where query results are stored. */
32 struct r600_resource
*buf
;
33 /* Offset of the next free result after current query data */
35 /* If a query buffer is full, a new buffer is created and the old one
36 * is put in here. When we calculate the result, we sum up the samples
37 * from all buffers. */
38 struct r600_query_buffer
*previous
;
42 /* The query buffer and how many results are in it. */
43 struct r600_query_buffer buffer
;
44 /* The type of query */
46 /* Size of the result in memory for both begin_query and end_query,
47 * this can be one or two numbers, or it could even be a size of a structure. */
49 /* The number of dwords for begin_query or end_query. */
51 /* linked list of queries */
52 struct list_head list
;
53 /* for custom non-GPU queries */
54 uint64_t begin_result
;
56 /* Fence for GPU_FINISHED. */
57 struct pipe_fence_handle
*fence
;
58 /* For transform feedback: which stream the query is for */
63 static bool r600_is_timer_query(unsigned type
)
65 return type
== PIPE_QUERY_TIME_ELAPSED
||
66 type
== PIPE_QUERY_TIMESTAMP
;
69 static bool r600_query_needs_begin(unsigned type
)
71 return type
!= PIPE_QUERY_GPU_FINISHED
&&
72 type
!= PIPE_QUERY_TIMESTAMP
;
75 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
, unsigned type
)
77 unsigned j
, i
, num_results
, buf_size
= 4096;
80 /* Non-GPU queries. */
82 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
83 case PIPE_QUERY_GPU_FINISHED
:
84 case R600_QUERY_DRAW_CALLS
:
85 case R600_QUERY_REQUESTED_VRAM
:
86 case R600_QUERY_REQUESTED_GTT
:
87 case R600_QUERY_BUFFER_WAIT_TIME
:
88 case R600_QUERY_NUM_CS_FLUSHES
:
89 case R600_QUERY_NUM_BYTES_MOVED
:
90 case R600_QUERY_VRAM_USAGE
:
91 case R600_QUERY_GTT_USAGE
:
92 case R600_QUERY_GPU_TEMPERATURE
:
93 case R600_QUERY_CURRENT_GPU_SCLK
:
94 case R600_QUERY_CURRENT_GPU_MCLK
:
95 case R600_QUERY_GPU_LOAD
:
96 case R600_QUERY_NUM_COMPILATIONS
:
97 case R600_QUERY_NUM_SHADERS_CREATED
:
101 /* Queries are normally read by the CPU after
102 * being written by the gpu, hence staging is probably a good
105 struct r600_resource
*buf
= (struct r600_resource
*)
106 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
107 PIPE_USAGE_STAGING
, buf_size
);
110 case PIPE_QUERY_OCCLUSION_COUNTER
:
111 case PIPE_QUERY_OCCLUSION_PREDICATE
:
112 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
113 memset(results
, 0, buf_size
);
115 /* Set top bits for unused backends. */
116 num_results
= buf_size
/ (16 * ctx
->max_db
);
117 for (j
= 0; j
< num_results
; j
++) {
118 for (i
= 0; i
< ctx
->max_db
; i
++) {
119 if (!(ctx
->backend_mask
& (1<<i
))) {
120 results
[(i
* 4)+1] = 0x80000000;
121 results
[(i
* 4)+3] = 0x80000000;
124 results
+= 4 * ctx
->max_db
;
127 case PIPE_QUERY_TIME_ELAPSED
:
128 case PIPE_QUERY_TIMESTAMP
:
130 case PIPE_QUERY_PRIMITIVES_EMITTED
:
131 case PIPE_QUERY_PRIMITIVES_GENERATED
:
132 case PIPE_QUERY_SO_STATISTICS
:
133 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
134 case PIPE_QUERY_PIPELINE_STATISTICS
:
135 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
136 memset(results
, 0, buf_size
);
144 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
145 unsigned type
, int diff
)
147 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
148 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
149 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
152 rctx
->num_occlusion_queries
+= diff
;
153 assert(rctx
->num_occlusion_queries
>= 0);
155 enable
= rctx
->num_occlusion_queries
!= 0;
157 if (enable
!= old_enable
) {
158 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
163 static unsigned event_type_for_stream(struct r600_query
*query
)
165 switch (query
->stream
) {
167 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
168 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
169 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
170 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
174 static void r600_emit_query_begin(struct r600_common_context
*ctx
, struct r600_query
*query
)
176 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
179 r600_update_occlusion_query_state(ctx
, query
->type
, 1);
180 r600_update_prims_generated_query_state(ctx
, query
->type
, 1);
181 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
* 2, TRUE
);
183 /* Get a new query buffer if needed. */
184 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
185 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
186 *qbuf
= query
->buffer
;
187 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
->type
);
188 query
->buffer
.results_end
= 0;
189 query
->buffer
.previous
= qbuf
;
192 /* emit begin query */
193 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
195 switch (query
->type
) {
196 case PIPE_QUERY_OCCLUSION_COUNTER
:
197 case PIPE_QUERY_OCCLUSION_PREDICATE
:
198 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
199 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
201 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
203 case PIPE_QUERY_PRIMITIVES_EMITTED
:
204 case PIPE_QUERY_PRIMITIVES_GENERATED
:
205 case PIPE_QUERY_SO_STATISTICS
:
206 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
207 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
208 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
210 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
212 case PIPE_QUERY_TIME_ELAPSED
:
213 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
214 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
216 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
220 case PIPE_QUERY_PIPELINE_STATISTICS
:
221 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
222 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
224 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
229 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
232 if (r600_is_timer_query(query
->type
))
233 ctx
->num_cs_dw_timer_queries_suspend
+= query
->num_cs_dw
;
235 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
238 static void r600_emit_query_end(struct r600_common_context
*ctx
, struct r600_query
*query
)
240 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
243 /* The queries which need begin already called this in begin_query. */
244 if (!r600_query_needs_begin(query
->type
)) {
245 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
, FALSE
);
248 va
= query
->buffer
.buf
->gpu_address
;
251 switch (query
->type
) {
252 case PIPE_QUERY_OCCLUSION_COUNTER
:
253 case PIPE_QUERY_OCCLUSION_PREDICATE
:
254 va
+= query
->buffer
.results_end
+ 8;
255 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
256 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
258 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
260 case PIPE_QUERY_PRIMITIVES_EMITTED
:
261 case PIPE_QUERY_PRIMITIVES_GENERATED
:
262 case PIPE_QUERY_SO_STATISTICS
:
263 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
264 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
265 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
266 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
268 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
270 case PIPE_QUERY_TIME_ELAPSED
:
271 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
273 case PIPE_QUERY_TIMESTAMP
:
274 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
275 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
277 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
281 case PIPE_QUERY_PIPELINE_STATISTICS
:
282 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
283 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
284 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
286 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
291 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
294 query
->buffer
.results_end
+= query
->result_size
;
296 if (r600_query_needs_begin(query
->type
)) {
297 if (r600_is_timer_query(query
->type
))
298 ctx
->num_cs_dw_timer_queries_suspend
-= query
->num_cs_dw
;
300 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
303 r600_update_occlusion_query_state(ctx
, query
->type
, -1);
304 r600_update_prims_generated_query_state(ctx
, query
->type
, -1);
307 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
308 struct r600_atom
*atom
)
310 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
311 struct r600_query
*query
= (struct r600_query
*)ctx
->render_cond
;
312 struct r600_query_buffer
*qbuf
;
319 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
320 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
322 switch (query
->type
) {
323 case PIPE_QUERY_OCCLUSION_COUNTER
:
324 case PIPE_QUERY_OCCLUSION_PREDICATE
:
325 op
= PRED_OP(PREDICATION_OP_ZPASS
);
327 case PIPE_QUERY_PRIMITIVES_EMITTED
:
328 case PIPE_QUERY_PRIMITIVES_GENERATED
:
329 case PIPE_QUERY_SO_STATISTICS
:
330 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
331 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
338 /* if true then invert, see GL_ARB_conditional_render_inverted */
339 if (ctx
->render_cond_invert
)
340 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
342 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
344 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
346 /* emit predicate packets for all data blocks */
347 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
348 unsigned results_base
= 0;
349 uint64_t va
= qbuf
->buf
->gpu_address
;
351 while (results_base
< qbuf
->results_end
) {
352 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
353 radeon_emit(cs
, va
+ results_base
);
354 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32) & 0xFF));
355 r600_emit_reloc(ctx
, &ctx
->gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
357 results_base
+= query
->result_size
;
359 /* set CONTINUE bit for all packets except the first */
360 op
|= PREDICATION_CONTINUE
;
365 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
367 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
368 struct r600_query
*query
;
369 bool skip_allocation
= false;
371 query
= CALLOC_STRUCT(r600_query
);
375 query
->type
= query_type
;
377 switch (query_type
) {
378 case PIPE_QUERY_OCCLUSION_COUNTER
:
379 case PIPE_QUERY_OCCLUSION_PREDICATE
:
380 query
->result_size
= 16 * rctx
->max_db
;
381 query
->num_cs_dw
= 6;
384 case PIPE_QUERY_TIME_ELAPSED
:
385 query
->result_size
= 16;
386 query
->num_cs_dw
= 8;
388 case PIPE_QUERY_TIMESTAMP
:
389 query
->result_size
= 8;
390 query
->num_cs_dw
= 8;
392 case PIPE_QUERY_PRIMITIVES_EMITTED
:
393 case PIPE_QUERY_PRIMITIVES_GENERATED
:
394 case PIPE_QUERY_SO_STATISTICS
:
395 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
396 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
397 query
->result_size
= 32;
398 query
->num_cs_dw
= 6;
399 query
->stream
= index
;
401 case PIPE_QUERY_PIPELINE_STATISTICS
:
402 /* 11 values on EG, 8 on R600. */
403 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
404 query
->num_cs_dw
= 6;
406 /* Non-GPU queries and queries not requiring a buffer. */
407 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
408 case PIPE_QUERY_GPU_FINISHED
:
409 case R600_QUERY_DRAW_CALLS
:
410 case R600_QUERY_REQUESTED_VRAM
:
411 case R600_QUERY_REQUESTED_GTT
:
412 case R600_QUERY_BUFFER_WAIT_TIME
:
413 case R600_QUERY_NUM_CS_FLUSHES
:
414 case R600_QUERY_NUM_BYTES_MOVED
:
415 case R600_QUERY_VRAM_USAGE
:
416 case R600_QUERY_GTT_USAGE
:
417 case R600_QUERY_GPU_TEMPERATURE
:
418 case R600_QUERY_CURRENT_GPU_SCLK
:
419 case R600_QUERY_CURRENT_GPU_MCLK
:
420 case R600_QUERY_GPU_LOAD
:
421 case R600_QUERY_NUM_COMPILATIONS
:
422 case R600_QUERY_NUM_SHADERS_CREATED
:
423 skip_allocation
= true;
431 if (!skip_allocation
) {
432 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query_type
);
433 if (!query
->buffer
.buf
) {
438 return (struct pipe_query
*)query
;
441 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
443 struct r600_query
*rquery
= (struct r600_query
*)query
;
444 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
446 /* Release all query buffers. */
448 struct r600_query_buffer
*qbuf
= prev
;
449 prev
= prev
->previous
;
450 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
454 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
458 static boolean
r600_begin_query(struct pipe_context
*ctx
,
459 struct pipe_query
*query
)
461 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
462 struct r600_query
*rquery
= (struct r600_query
*)query
;
463 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
465 if (!r600_query_needs_begin(rquery
->type
)) {
470 /* Non-GPU queries. */
471 switch (rquery
->type
) {
472 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
474 case R600_QUERY_DRAW_CALLS
:
475 rquery
->begin_result
= rctx
->num_draw_calls
;
477 case R600_QUERY_REQUESTED_VRAM
:
478 case R600_QUERY_REQUESTED_GTT
:
479 case R600_QUERY_VRAM_USAGE
:
480 case R600_QUERY_GTT_USAGE
:
481 case R600_QUERY_GPU_TEMPERATURE
:
482 case R600_QUERY_CURRENT_GPU_SCLK
:
483 case R600_QUERY_CURRENT_GPU_MCLK
:
484 rquery
->begin_result
= 0;
486 case R600_QUERY_BUFFER_WAIT_TIME
:
487 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
) / 1000;
489 case R600_QUERY_NUM_CS_FLUSHES
:
490 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
492 case R600_QUERY_NUM_BYTES_MOVED
:
493 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
495 case R600_QUERY_GPU_LOAD
:
496 rquery
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
498 case R600_QUERY_NUM_COMPILATIONS
:
499 rquery
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
501 case R600_QUERY_NUM_SHADERS_CREATED
:
502 rquery
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
506 /* Discard the old query buffers. */
508 struct r600_query_buffer
*qbuf
= prev
;
509 prev
= prev
->previous
;
510 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
514 /* Obtain a new buffer if the current one can't be mapped without a stall. */
515 if (r600_rings_is_buffer_referenced(rctx
, rquery
->buffer
.buf
->cs_buf
, RADEON_USAGE_READWRITE
) ||
516 !rctx
->ws
->buffer_wait(rquery
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
517 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
518 rquery
->buffer
.buf
= r600_new_query_buffer(rctx
, rquery
->type
);
521 rquery
->buffer
.results_end
= 0;
522 rquery
->buffer
.previous
= NULL
;
524 r600_emit_query_begin(rctx
, rquery
);
526 if (r600_is_timer_query(rquery
->type
))
527 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_timer_queries
);
529 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_nontimer_queries
);
533 static void r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
535 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
536 struct r600_query
*rquery
= (struct r600_query
*)query
;
538 /* Non-GPU queries. */
539 switch (rquery
->type
) {
540 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
542 case PIPE_QUERY_GPU_FINISHED
:
543 ctx
->flush(ctx
, &rquery
->fence
, 0);
545 case R600_QUERY_DRAW_CALLS
:
546 rquery
->end_result
= rctx
->num_draw_calls
;
548 case R600_QUERY_REQUESTED_VRAM
:
549 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_VRAM_MEMORY
);
551 case R600_QUERY_REQUESTED_GTT
:
552 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_GTT_MEMORY
);
554 case R600_QUERY_BUFFER_WAIT_TIME
:
555 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
) / 1000;
557 case R600_QUERY_NUM_CS_FLUSHES
:
558 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
560 case R600_QUERY_NUM_BYTES_MOVED
:
561 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
563 case R600_QUERY_VRAM_USAGE
:
564 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_VRAM_USAGE
);
566 case R600_QUERY_GTT_USAGE
:
567 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GTT_USAGE
);
569 case R600_QUERY_GPU_TEMPERATURE
:
570 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GPU_TEMPERATURE
) / 1000;
572 case R600_QUERY_CURRENT_GPU_SCLK
:
573 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_SCLK
) * 1000000;
575 case R600_QUERY_CURRENT_GPU_MCLK
:
576 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_MCLK
) * 1000000;
578 case R600_QUERY_GPU_LOAD
:
579 rquery
->end_result
= r600_gpu_load_end(rctx
->screen
, rquery
->begin_result
);
581 case R600_QUERY_NUM_COMPILATIONS
:
582 rquery
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
584 case R600_QUERY_NUM_SHADERS_CREATED
:
585 rquery
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
589 r600_emit_query_end(rctx
, rquery
);
591 if (r600_query_needs_begin(rquery
->type
))
592 LIST_DELINIT(&rquery
->list
);
595 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
596 bool test_status_bit
)
598 uint32_t *current_result
= (uint32_t*)map
;
601 start
= (uint64_t)current_result
[start_index
] |
602 (uint64_t)current_result
[start_index
+1] << 32;
603 end
= (uint64_t)current_result
[end_index
] |
604 (uint64_t)current_result
[end_index
+1] << 32;
606 if (!test_status_bit
||
607 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
613 static boolean
r600_get_query_buffer_result(struct r600_common_context
*ctx
,
614 struct r600_query
*query
,
615 struct r600_query_buffer
*qbuf
,
617 union pipe_query_result
*result
)
619 struct pipe_screen
*screen
= ctx
->b
.screen
;
620 unsigned results_base
= 0;
623 /* Non-GPU queries. */
624 switch (query
->type
) {
625 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
626 /* Convert from cycles per millisecond to cycles per second (Hz). */
627 result
->timestamp_disjoint
.frequency
=
628 (uint64_t)ctx
->screen
->info
.r600_clock_crystal_freq
* 1000;
629 result
->timestamp_disjoint
.disjoint
= FALSE
;
631 case PIPE_QUERY_GPU_FINISHED
:
632 result
->b
= screen
->fence_finish(screen
, query
->fence
,
633 wait
? PIPE_TIMEOUT_INFINITE
: 0);
635 case R600_QUERY_DRAW_CALLS
:
636 case R600_QUERY_REQUESTED_VRAM
:
637 case R600_QUERY_REQUESTED_GTT
:
638 case R600_QUERY_BUFFER_WAIT_TIME
:
639 case R600_QUERY_NUM_CS_FLUSHES
:
640 case R600_QUERY_NUM_BYTES_MOVED
:
641 case R600_QUERY_VRAM_USAGE
:
642 case R600_QUERY_GTT_USAGE
:
643 case R600_QUERY_GPU_TEMPERATURE
:
644 case R600_QUERY_CURRENT_GPU_SCLK
:
645 case R600_QUERY_CURRENT_GPU_MCLK
:
646 case R600_QUERY_NUM_COMPILATIONS
:
647 case R600_QUERY_NUM_SHADERS_CREATED
:
648 result
->u64
= query
->end_result
- query
->begin_result
;
650 case R600_QUERY_GPU_LOAD
:
651 result
->u64
= query
->end_result
;
655 map
= r600_buffer_map_sync_with_rings(ctx
, qbuf
->buf
,
657 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
661 /* count all results across all data blocks */
662 switch (query
->type
) {
663 case PIPE_QUERY_OCCLUSION_COUNTER
:
664 while (results_base
!= qbuf
->results_end
) {
666 r600_query_read_result(map
+ results_base
, 0, 2, true);
670 case PIPE_QUERY_OCCLUSION_PREDICATE
:
671 while (results_base
!= qbuf
->results_end
) {
672 result
->b
= result
->b
||
673 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
677 case PIPE_QUERY_TIME_ELAPSED
:
678 while (results_base
!= qbuf
->results_end
) {
680 r600_query_read_result(map
+ results_base
, 0, 2, false);
681 results_base
+= query
->result_size
;
684 case PIPE_QUERY_TIMESTAMP
:
686 uint32_t *current_result
= (uint32_t*)map
;
687 result
->u64
= (uint64_t)current_result
[0] |
688 (uint64_t)current_result
[1] << 32;
691 case PIPE_QUERY_PRIMITIVES_EMITTED
:
692 /* SAMPLE_STREAMOUTSTATS stores this structure:
694 * u64 NumPrimitivesWritten;
695 * u64 PrimitiveStorageNeeded;
697 * We only need NumPrimitivesWritten here. */
698 while (results_base
!= qbuf
->results_end
) {
700 r600_query_read_result(map
+ results_base
, 2, 6, true);
701 results_base
+= query
->result_size
;
704 case PIPE_QUERY_PRIMITIVES_GENERATED
:
705 /* Here we read PrimitiveStorageNeeded. */
706 while (results_base
!= qbuf
->results_end
) {
708 r600_query_read_result(map
+ results_base
, 0, 4, true);
709 results_base
+= query
->result_size
;
712 case PIPE_QUERY_SO_STATISTICS
:
713 while (results_base
!= qbuf
->results_end
) {
714 result
->so_statistics
.num_primitives_written
+=
715 r600_query_read_result(map
+ results_base
, 2, 6, true);
716 result
->so_statistics
.primitives_storage_needed
+=
717 r600_query_read_result(map
+ results_base
, 0, 4, true);
718 results_base
+= query
->result_size
;
721 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
722 while (results_base
!= qbuf
->results_end
) {
723 result
->b
= result
->b
||
724 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
725 r600_query_read_result(map
+ results_base
, 0, 4, true);
726 results_base
+= query
->result_size
;
729 case PIPE_QUERY_PIPELINE_STATISTICS
:
730 if (ctx
->chip_class
>= EVERGREEN
) {
731 while (results_base
!= qbuf
->results_end
) {
732 result
->pipeline_statistics
.ps_invocations
+=
733 r600_query_read_result(map
+ results_base
, 0, 22, false);
734 result
->pipeline_statistics
.c_primitives
+=
735 r600_query_read_result(map
+ results_base
, 2, 24, false);
736 result
->pipeline_statistics
.c_invocations
+=
737 r600_query_read_result(map
+ results_base
, 4, 26, false);
738 result
->pipeline_statistics
.vs_invocations
+=
739 r600_query_read_result(map
+ results_base
, 6, 28, false);
740 result
->pipeline_statistics
.gs_invocations
+=
741 r600_query_read_result(map
+ results_base
, 8, 30, false);
742 result
->pipeline_statistics
.gs_primitives
+=
743 r600_query_read_result(map
+ results_base
, 10, 32, false);
744 result
->pipeline_statistics
.ia_primitives
+=
745 r600_query_read_result(map
+ results_base
, 12, 34, false);
746 result
->pipeline_statistics
.ia_vertices
+=
747 r600_query_read_result(map
+ results_base
, 14, 36, false);
748 result
->pipeline_statistics
.hs_invocations
+=
749 r600_query_read_result(map
+ results_base
, 16, 38, false);
750 result
->pipeline_statistics
.ds_invocations
+=
751 r600_query_read_result(map
+ results_base
, 18, 40, false);
752 result
->pipeline_statistics
.cs_invocations
+=
753 r600_query_read_result(map
+ results_base
, 20, 42, false);
754 results_base
+= query
->result_size
;
757 while (results_base
!= qbuf
->results_end
) {
758 result
->pipeline_statistics
.ps_invocations
+=
759 r600_query_read_result(map
+ results_base
, 0, 16, false);
760 result
->pipeline_statistics
.c_primitives
+=
761 r600_query_read_result(map
+ results_base
, 2, 18, false);
762 result
->pipeline_statistics
.c_invocations
+=
763 r600_query_read_result(map
+ results_base
, 4, 20, false);
764 result
->pipeline_statistics
.vs_invocations
+=
765 r600_query_read_result(map
+ results_base
, 6, 22, false);
766 result
->pipeline_statistics
.gs_invocations
+=
767 r600_query_read_result(map
+ results_base
, 8, 24, false);
768 result
->pipeline_statistics
.gs_primitives
+=
769 r600_query_read_result(map
+ results_base
, 10, 26, false);
770 result
->pipeline_statistics
.ia_primitives
+=
771 r600_query_read_result(map
+ results_base
, 12, 28, false);
772 result
->pipeline_statistics
.ia_vertices
+=
773 r600_query_read_result(map
+ results_base
, 14, 30, false);
774 results_base
+= query
->result_size
;
777 #if 0 /* for testing */
778 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
779 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
780 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
781 result
->pipeline_statistics
.ia_vertices
,
782 result
->pipeline_statistics
.ia_primitives
,
783 result
->pipeline_statistics
.vs_invocations
,
784 result
->pipeline_statistics
.hs_invocations
,
785 result
->pipeline_statistics
.ds_invocations
,
786 result
->pipeline_statistics
.gs_invocations
,
787 result
->pipeline_statistics
.gs_primitives
,
788 result
->pipeline_statistics
.c_invocations
,
789 result
->pipeline_statistics
.c_primitives
,
790 result
->pipeline_statistics
.ps_invocations
,
791 result
->pipeline_statistics
.cs_invocations
);
801 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
802 struct pipe_query
*query
,
803 boolean wait
, union pipe_query_result
*result
)
805 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
806 struct r600_query
*rquery
= (struct r600_query
*)query
;
807 struct r600_query_buffer
*qbuf
;
809 util_query_clear_result(result
, rquery
->type
);
811 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
812 if (!r600_get_query_buffer_result(rctx
, rquery
, qbuf
, wait
, result
)) {
817 /* Convert the time to expected units. */
818 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
819 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
820 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.r600_clock_crystal_freq
;
825 static void r600_render_condition(struct pipe_context
*ctx
,
826 struct pipe_query
*query
,
830 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
831 struct r600_query
*rquery
= (struct r600_query
*)query
;
832 struct r600_query_buffer
*qbuf
;
833 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
835 rctx
->render_cond
= query
;
836 rctx
->render_cond_invert
= condition
;
837 rctx
->render_cond_mode
= mode
;
839 /* Compute the size of SET_PREDICATION packets. */
841 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
842 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
844 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
847 static void r600_suspend_queries(struct r600_common_context
*ctx
,
848 struct list_head
*query_list
,
849 unsigned *num_cs_dw_queries_suspend
)
851 struct r600_query
*query
;
853 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
854 r600_emit_query_end(ctx
, query
);
856 assert(*num_cs_dw_queries_suspend
== 0);
859 void r600_suspend_nontimer_queries(struct r600_common_context
*ctx
)
861 r600_suspend_queries(ctx
, &ctx
->active_nontimer_queries
,
862 &ctx
->num_cs_dw_nontimer_queries_suspend
);
865 void r600_suspend_timer_queries(struct r600_common_context
*ctx
)
867 r600_suspend_queries(ctx
, &ctx
->active_timer_queries
,
868 &ctx
->num_cs_dw_timer_queries_suspend
);
871 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
872 struct list_head
*query_list
)
874 struct r600_query
*query
;
877 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
879 num_dw
+= query
->num_cs_dw
* 2;
881 /* Workaround for the fact that
882 * num_cs_dw_nontimer_queries_suspend is incremented for every
883 * resumed query, which raises the bar in need_cs_space for
884 * queries about to be resumed.
886 num_dw
+= query
->num_cs_dw
;
888 /* primitives generated query */
889 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
890 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
896 static void r600_resume_queries(struct r600_common_context
*ctx
,
897 struct list_head
*query_list
,
898 unsigned *num_cs_dw_queries_suspend
)
900 struct r600_query
*query
;
901 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, query_list
);
903 assert(*num_cs_dw_queries_suspend
== 0);
905 /* Check CS space here. Resuming must not be interrupted by flushes. */
906 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, TRUE
);
908 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
909 r600_emit_query_begin(ctx
, query
);
913 void r600_resume_nontimer_queries(struct r600_common_context
*ctx
)
915 r600_resume_queries(ctx
, &ctx
->active_nontimer_queries
,
916 &ctx
->num_cs_dw_nontimer_queries_suspend
);
919 void r600_resume_timer_queries(struct r600_common_context
*ctx
)
921 r600_resume_queries(ctx
, &ctx
->active_timer_queries
,
922 &ctx
->num_cs_dw_timer_queries_suspend
);
925 /* Get backends mask */
926 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
928 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
929 struct r600_resource
*buffer
;
931 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
932 unsigned i
, mask
= 0;
934 /* if backend_map query is supported by the kernel */
935 if (ctx
->screen
->info
.r600_backend_map_valid
) {
936 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
937 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
938 unsigned item_width
, item_mask
;
940 if (ctx
->chip_class
>= EVERGREEN
) {
948 while(num_tile_pipes
--) {
949 i
= backend_map
& item_mask
;
951 backend_map
>>= item_width
;
954 ctx
->backend_mask
= mask
;
959 /* otherwise backup path for older kernels */
961 /* create buffer for event data */
962 buffer
= (struct r600_resource
*)
963 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
964 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
968 /* initialize buffer with zeroes */
969 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
971 memset(results
, 0, ctx
->max_db
* 4 * 4);
973 /* emit EVENT_WRITE for ZPASS_DONE */
974 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
975 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
976 radeon_emit(cs
, buffer
->gpu_address
);
977 radeon_emit(cs
, buffer
->gpu_address
>> 32);
979 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
980 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
982 /* analyze results */
983 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
985 for(i
= 0; i
< ctx
->max_db
; i
++) {
986 /* at least highest bit will be set if backend is used */
987 if (results
[i
*4 + 1])
993 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
996 ctx
->backend_mask
= mask
;
1001 /* fallback to old method - set num_backends lower bits to 1 */
1002 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
1006 #define X(name_, query_type_, type_, result_type_) \
1009 .query_type = R600_QUERY_##query_type_, \
1010 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1011 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1012 .group_id = ~(unsigned)0 \
1015 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1016 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1017 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1018 X("draw-calls", DRAW_CALLS
, UINT64
, CUMULATIVE
),
1019 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1020 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1021 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1022 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, CUMULATIVE
),
1023 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1024 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1025 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1026 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1027 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1028 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1029 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1034 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1036 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1037 return Elements(r600_driver_query_list
);
1038 else if (rscreen
->info
.drm_major
== 3)
1039 return Elements(r600_driver_query_list
) - 3;
1041 return Elements(r600_driver_query_list
) - 4;
1044 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
1046 struct pipe_driver_query_info
*info
)
1048 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1049 unsigned num_queries
= r600_get_num_queries(rscreen
);
1054 if (index
>= num_queries
)
1057 *info
= r600_driver_query_list
[index
];
1059 switch (info
->query_type
) {
1060 case R600_QUERY_REQUESTED_VRAM
:
1061 case R600_QUERY_VRAM_USAGE
:
1062 info
->max_value
.u64
= rscreen
->info
.vram_size
;
1064 case R600_QUERY_REQUESTED_GTT
:
1065 case R600_QUERY_GTT_USAGE
:
1066 info
->max_value
.u64
= rscreen
->info
.gart_size
;
1068 case R600_QUERY_GPU_TEMPERATURE
:
1069 info
->max_value
.u64
= 125;
1076 void r600_query_init(struct r600_common_context
*rctx
)
1078 rctx
->b
.create_query
= r600_create_query
;
1079 rctx
->b
.destroy_query
= r600_destroy_query
;
1080 rctx
->b
.begin_query
= r600_begin_query
;
1081 rctx
->b
.end_query
= r600_end_query
;
1082 rctx
->b
.get_query_result
= r600_get_query_result
;
1083 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
1085 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.r600_num_backends
> 0)
1086 rctx
->b
.render_condition
= r600_render_condition
;
1088 LIST_INITHEAD(&rctx
->active_nontimer_queries
);
1089 LIST_INITHEAD(&rctx
->active_timer_queries
);
1092 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
1094 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;