2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
29 struct r600_query_buffer
{
30 /* The buffer where query results are stored. */
31 struct r600_resource
*buf
;
32 /* Offset of the next free result after current query data */
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer
*previous
;
41 struct r600_query_ops
*ops
;
43 /* The query buffer and how many results are in it. */
44 struct r600_query_buffer buffer
;
45 /* The type of query */
47 /* Size of the result in memory for both begin_query and end_query,
48 * this can be one or two numbers, or it could even be a size of a structure. */
50 /* The number of dwords for begin_query or end_query. */
52 /* linked list of queries */
53 struct list_head list
;
54 /* for custom non-GPU queries */
55 uint64_t begin_result
;
57 /* Fence for GPU_FINISHED. */
58 struct pipe_fence_handle
*fence
;
59 /* For transform feedback: which stream the query is for */
63 static void r600_do_destroy_query(struct r600_common_context
*, struct r600_query
*);
64 static boolean
r600_do_begin_query(struct r600_common_context
*, struct r600_query
*);
65 static void r600_do_end_query(struct r600_common_context
*, struct r600_query
*);
66 static boolean
r600_do_get_query_result(struct r600_common_context
*,
67 struct r600_query
*, boolean wait
,
68 union pipe_query_result
*result
);
70 static struct r600_query_ops legacy_query_ops
= {
71 .destroy
= r600_do_destroy_query
,
72 .begin
= r600_do_begin_query
,
73 .end
= r600_do_end_query
,
74 .get_result
= r600_do_get_query_result
,
77 static bool r600_is_timer_query(unsigned type
)
79 return type
== PIPE_QUERY_TIME_ELAPSED
||
80 type
== PIPE_QUERY_TIMESTAMP
;
83 static bool r600_query_needs_begin(unsigned type
)
85 return type
!= PIPE_QUERY_GPU_FINISHED
&&
86 type
!= PIPE_QUERY_TIMESTAMP
;
89 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
, unsigned type
)
91 unsigned j
, i
, num_results
, buf_size
= 4096;
94 /* Non-GPU queries. */
96 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
97 case PIPE_QUERY_GPU_FINISHED
:
98 case R600_QUERY_DRAW_CALLS
:
99 case R600_QUERY_REQUESTED_VRAM
:
100 case R600_QUERY_REQUESTED_GTT
:
101 case R600_QUERY_BUFFER_WAIT_TIME
:
102 case R600_QUERY_NUM_CS_FLUSHES
:
103 case R600_QUERY_NUM_BYTES_MOVED
:
104 case R600_QUERY_VRAM_USAGE
:
105 case R600_QUERY_GTT_USAGE
:
106 case R600_QUERY_GPU_TEMPERATURE
:
107 case R600_QUERY_CURRENT_GPU_SCLK
:
108 case R600_QUERY_CURRENT_GPU_MCLK
:
109 case R600_QUERY_GPU_LOAD
:
110 case R600_QUERY_NUM_COMPILATIONS
:
111 case R600_QUERY_NUM_SHADERS_CREATED
:
115 /* Queries are normally read by the CPU after
116 * being written by the gpu, hence staging is probably a good
119 struct r600_resource
*buf
= (struct r600_resource
*)
120 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
121 PIPE_USAGE_STAGING
, buf_size
);
124 case PIPE_QUERY_OCCLUSION_COUNTER
:
125 case PIPE_QUERY_OCCLUSION_PREDICATE
:
126 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
127 memset(results
, 0, buf_size
);
129 /* Set top bits for unused backends. */
130 num_results
= buf_size
/ (16 * ctx
->max_db
);
131 for (j
= 0; j
< num_results
; j
++) {
132 for (i
= 0; i
< ctx
->max_db
; i
++) {
133 if (!(ctx
->backend_mask
& (1<<i
))) {
134 results
[(i
* 4)+1] = 0x80000000;
135 results
[(i
* 4)+3] = 0x80000000;
138 results
+= 4 * ctx
->max_db
;
141 case PIPE_QUERY_TIME_ELAPSED
:
142 case PIPE_QUERY_TIMESTAMP
:
144 case PIPE_QUERY_PRIMITIVES_EMITTED
:
145 case PIPE_QUERY_PRIMITIVES_GENERATED
:
146 case PIPE_QUERY_SO_STATISTICS
:
147 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
148 case PIPE_QUERY_PIPELINE_STATISTICS
:
149 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
150 memset(results
, 0, buf_size
);
158 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
159 unsigned type
, int diff
)
161 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
162 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
163 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
166 rctx
->num_occlusion_queries
+= diff
;
167 assert(rctx
->num_occlusion_queries
>= 0);
169 enable
= rctx
->num_occlusion_queries
!= 0;
171 if (enable
!= old_enable
) {
172 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
177 static unsigned event_type_for_stream(struct r600_query
*query
)
179 switch (query
->stream
) {
181 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
182 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
183 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
184 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
188 static void r600_emit_query_begin(struct r600_common_context
*ctx
, struct r600_query
*query
)
190 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
193 r600_update_occlusion_query_state(ctx
, query
->type
, 1);
194 r600_update_prims_generated_query_state(ctx
, query
->type
, 1);
195 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
* 2, TRUE
);
197 /* Get a new query buffer if needed. */
198 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
199 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
200 *qbuf
= query
->buffer
;
201 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
->type
);
202 query
->buffer
.results_end
= 0;
203 query
->buffer
.previous
= qbuf
;
206 /* emit begin query */
207 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
209 switch (query
->type
) {
210 case PIPE_QUERY_OCCLUSION_COUNTER
:
211 case PIPE_QUERY_OCCLUSION_PREDICATE
:
212 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
213 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
215 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
217 case PIPE_QUERY_PRIMITIVES_EMITTED
:
218 case PIPE_QUERY_PRIMITIVES_GENERATED
:
219 case PIPE_QUERY_SO_STATISTICS
:
220 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
221 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
222 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
224 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
226 case PIPE_QUERY_TIME_ELAPSED
:
227 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
228 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
230 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
234 case PIPE_QUERY_PIPELINE_STATISTICS
:
235 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
236 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
238 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
243 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
246 if (r600_is_timer_query(query
->type
))
247 ctx
->num_cs_dw_timer_queries_suspend
+= query
->num_cs_dw
;
249 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
252 static void r600_emit_query_end(struct r600_common_context
*ctx
, struct r600_query
*query
)
254 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
257 /* The queries which need begin already called this in begin_query. */
258 if (!r600_query_needs_begin(query
->type
)) {
259 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
, FALSE
);
262 va
= query
->buffer
.buf
->gpu_address
;
265 switch (query
->type
) {
266 case PIPE_QUERY_OCCLUSION_COUNTER
:
267 case PIPE_QUERY_OCCLUSION_PREDICATE
:
268 va
+= query
->buffer
.results_end
+ 8;
269 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
270 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
272 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
274 case PIPE_QUERY_PRIMITIVES_EMITTED
:
275 case PIPE_QUERY_PRIMITIVES_GENERATED
:
276 case PIPE_QUERY_SO_STATISTICS
:
277 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
278 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
279 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
280 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
282 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
284 case PIPE_QUERY_TIME_ELAPSED
:
285 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
287 case PIPE_QUERY_TIMESTAMP
:
288 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
289 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
291 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
295 case PIPE_QUERY_PIPELINE_STATISTICS
:
296 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
297 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
298 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
300 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
305 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
308 query
->buffer
.results_end
+= query
->result_size
;
310 if (r600_query_needs_begin(query
->type
)) {
311 if (r600_is_timer_query(query
->type
))
312 ctx
->num_cs_dw_timer_queries_suspend
-= query
->num_cs_dw
;
314 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
317 r600_update_occlusion_query_state(ctx
, query
->type
, -1);
318 r600_update_prims_generated_query_state(ctx
, query
->type
, -1);
321 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
322 struct r600_atom
*atom
)
324 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
325 struct r600_query
*query
= (struct r600_query
*)ctx
->render_cond
;
326 struct r600_query_buffer
*qbuf
;
333 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
334 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
336 switch (query
->type
) {
337 case PIPE_QUERY_OCCLUSION_COUNTER
:
338 case PIPE_QUERY_OCCLUSION_PREDICATE
:
339 op
= PRED_OP(PREDICATION_OP_ZPASS
);
341 case PIPE_QUERY_PRIMITIVES_EMITTED
:
342 case PIPE_QUERY_PRIMITIVES_GENERATED
:
343 case PIPE_QUERY_SO_STATISTICS
:
344 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
345 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
352 /* if true then invert, see GL_ARB_conditional_render_inverted */
353 if (ctx
->render_cond_invert
)
354 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
356 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
358 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
360 /* emit predicate packets for all data blocks */
361 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
362 unsigned results_base
= 0;
363 uint64_t va
= qbuf
->buf
->gpu_address
;
365 while (results_base
< qbuf
->results_end
) {
366 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
367 radeon_emit(cs
, va
+ results_base
);
368 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32) & 0xFF));
369 r600_emit_reloc(ctx
, &ctx
->gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
371 results_base
+= query
->result_size
;
373 /* set CONTINUE bit for all packets except the first */
374 op
|= PREDICATION_CONTINUE
;
379 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
381 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
382 struct r600_query
*query
;
383 bool skip_allocation
= false;
385 query
= CALLOC_STRUCT(r600_query
);
389 query
->type
= query_type
;
390 query
->ops
= &legacy_query_ops
;
392 switch (query_type
) {
393 case PIPE_QUERY_OCCLUSION_COUNTER
:
394 case PIPE_QUERY_OCCLUSION_PREDICATE
:
395 query
->result_size
= 16 * rctx
->max_db
;
396 query
->num_cs_dw
= 6;
398 case PIPE_QUERY_TIME_ELAPSED
:
399 query
->result_size
= 16;
400 query
->num_cs_dw
= 8;
402 case PIPE_QUERY_TIMESTAMP
:
403 query
->result_size
= 8;
404 query
->num_cs_dw
= 8;
406 case PIPE_QUERY_PRIMITIVES_EMITTED
:
407 case PIPE_QUERY_PRIMITIVES_GENERATED
:
408 case PIPE_QUERY_SO_STATISTICS
:
409 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
410 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
411 query
->result_size
= 32;
412 query
->num_cs_dw
= 6;
413 query
->stream
= index
;
415 case PIPE_QUERY_PIPELINE_STATISTICS
:
416 /* 11 values on EG, 8 on R600. */
417 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
418 query
->num_cs_dw
= 6;
420 /* Non-GPU queries and queries not requiring a buffer. */
421 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
422 case PIPE_QUERY_GPU_FINISHED
:
423 case R600_QUERY_DRAW_CALLS
:
424 case R600_QUERY_REQUESTED_VRAM
:
425 case R600_QUERY_REQUESTED_GTT
:
426 case R600_QUERY_BUFFER_WAIT_TIME
:
427 case R600_QUERY_NUM_CS_FLUSHES
:
428 case R600_QUERY_NUM_BYTES_MOVED
:
429 case R600_QUERY_VRAM_USAGE
:
430 case R600_QUERY_GTT_USAGE
:
431 case R600_QUERY_GPU_TEMPERATURE
:
432 case R600_QUERY_CURRENT_GPU_SCLK
:
433 case R600_QUERY_CURRENT_GPU_MCLK
:
434 case R600_QUERY_GPU_LOAD
:
435 case R600_QUERY_NUM_COMPILATIONS
:
436 case R600_QUERY_NUM_SHADERS_CREATED
:
437 skip_allocation
= true;
445 if (!skip_allocation
) {
446 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query_type
);
447 if (!query
->buffer
.buf
) {
452 return (struct pipe_query
*)query
;
455 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
457 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
458 struct r600_query
*rquery
= (struct r600_query
*)query
;
460 rquery
->ops
->destroy(rctx
, rquery
);
463 static void r600_do_destroy_query(struct r600_common_context
*rctx
,
464 struct r600_query
*rquery
)
466 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
468 /* Release all query buffers. */
470 struct r600_query_buffer
*qbuf
= prev
;
471 prev
= prev
->previous
;
472 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
476 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
480 static boolean
r600_begin_query(struct pipe_context
*ctx
,
481 struct pipe_query
*query
)
483 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
484 struct r600_query
*rquery
= (struct r600_query
*)query
;
486 return rquery
->ops
->begin(rctx
, rquery
);
489 static boolean
r600_do_begin_query(struct r600_common_context
*rctx
,
490 struct r600_query
*rquery
)
492 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
494 if (!r600_query_needs_begin(rquery
->type
)) {
499 /* Non-GPU queries. */
500 switch (rquery
->type
) {
501 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
503 case R600_QUERY_DRAW_CALLS
:
504 rquery
->begin_result
= rctx
->num_draw_calls
;
506 case R600_QUERY_REQUESTED_VRAM
:
507 case R600_QUERY_REQUESTED_GTT
:
508 case R600_QUERY_VRAM_USAGE
:
509 case R600_QUERY_GTT_USAGE
:
510 case R600_QUERY_GPU_TEMPERATURE
:
511 case R600_QUERY_CURRENT_GPU_SCLK
:
512 case R600_QUERY_CURRENT_GPU_MCLK
:
513 rquery
->begin_result
= 0;
515 case R600_QUERY_BUFFER_WAIT_TIME
:
516 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
) / 1000;
518 case R600_QUERY_NUM_CS_FLUSHES
:
519 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
521 case R600_QUERY_NUM_BYTES_MOVED
:
522 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
524 case R600_QUERY_GPU_LOAD
:
525 rquery
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
527 case R600_QUERY_NUM_COMPILATIONS
:
528 rquery
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
530 case R600_QUERY_NUM_SHADERS_CREATED
:
531 rquery
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
535 /* Discard the old query buffers. */
537 struct r600_query_buffer
*qbuf
= prev
;
538 prev
= prev
->previous
;
539 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
543 /* Obtain a new buffer if the current one can't be mapped without a stall. */
544 if (r600_rings_is_buffer_referenced(rctx
, rquery
->buffer
.buf
->cs_buf
, RADEON_USAGE_READWRITE
) ||
545 !rctx
->ws
->buffer_wait(rquery
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
546 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
547 rquery
->buffer
.buf
= r600_new_query_buffer(rctx
, rquery
->type
);
550 rquery
->buffer
.results_end
= 0;
551 rquery
->buffer
.previous
= NULL
;
553 r600_emit_query_begin(rctx
, rquery
);
555 if (r600_is_timer_query(rquery
->type
))
556 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_timer_queries
);
558 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_nontimer_queries
);
562 static void r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
564 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
565 struct r600_query
*rquery
= (struct r600_query
*)query
;
567 rquery
->ops
->end(rctx
, rquery
);
570 static void r600_do_end_query(struct r600_common_context
*rctx
,
571 struct r600_query
*rquery
)
573 /* Non-GPU queries. */
574 switch (rquery
->type
) {
575 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
577 case PIPE_QUERY_GPU_FINISHED
:
578 rctx
->b
.flush(&rctx
->b
, &rquery
->fence
, 0);
580 case R600_QUERY_DRAW_CALLS
:
581 rquery
->end_result
= rctx
->num_draw_calls
;
583 case R600_QUERY_REQUESTED_VRAM
:
584 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_VRAM_MEMORY
);
586 case R600_QUERY_REQUESTED_GTT
:
587 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_GTT_MEMORY
);
589 case R600_QUERY_BUFFER_WAIT_TIME
:
590 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
) / 1000;
592 case R600_QUERY_NUM_CS_FLUSHES
:
593 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
595 case R600_QUERY_NUM_BYTES_MOVED
:
596 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
598 case R600_QUERY_VRAM_USAGE
:
599 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_VRAM_USAGE
);
601 case R600_QUERY_GTT_USAGE
:
602 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GTT_USAGE
);
604 case R600_QUERY_GPU_TEMPERATURE
:
605 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GPU_TEMPERATURE
) / 1000;
607 case R600_QUERY_CURRENT_GPU_SCLK
:
608 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_SCLK
) * 1000000;
610 case R600_QUERY_CURRENT_GPU_MCLK
:
611 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_MCLK
) * 1000000;
613 case R600_QUERY_GPU_LOAD
:
614 rquery
->end_result
= r600_gpu_load_end(rctx
->screen
, rquery
->begin_result
);
616 case R600_QUERY_NUM_COMPILATIONS
:
617 rquery
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
619 case R600_QUERY_NUM_SHADERS_CREATED
:
620 rquery
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
624 r600_emit_query_end(rctx
, rquery
);
626 if (r600_query_needs_begin(rquery
->type
))
627 LIST_DELINIT(&rquery
->list
);
630 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
631 bool test_status_bit
)
633 uint32_t *current_result
= (uint32_t*)map
;
636 start
= (uint64_t)current_result
[start_index
] |
637 (uint64_t)current_result
[start_index
+1] << 32;
638 end
= (uint64_t)current_result
[end_index
] |
639 (uint64_t)current_result
[end_index
+1] << 32;
641 if (!test_status_bit
||
642 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
648 static boolean
r600_get_query_buffer_result(struct r600_common_context
*ctx
,
649 struct r600_query
*query
,
650 struct r600_query_buffer
*qbuf
,
652 union pipe_query_result
*result
)
654 struct pipe_screen
*screen
= ctx
->b
.screen
;
655 unsigned results_base
= 0;
658 /* Non-GPU queries. */
659 switch (query
->type
) {
660 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
661 /* Convert from cycles per millisecond to cycles per second (Hz). */
662 result
->timestamp_disjoint
.frequency
=
663 (uint64_t)ctx
->screen
->info
.r600_clock_crystal_freq
* 1000;
664 result
->timestamp_disjoint
.disjoint
= FALSE
;
666 case PIPE_QUERY_GPU_FINISHED
:
667 result
->b
= screen
->fence_finish(screen
, query
->fence
,
668 wait
? PIPE_TIMEOUT_INFINITE
: 0);
670 case R600_QUERY_DRAW_CALLS
:
671 case R600_QUERY_REQUESTED_VRAM
:
672 case R600_QUERY_REQUESTED_GTT
:
673 case R600_QUERY_BUFFER_WAIT_TIME
:
674 case R600_QUERY_NUM_CS_FLUSHES
:
675 case R600_QUERY_NUM_BYTES_MOVED
:
676 case R600_QUERY_VRAM_USAGE
:
677 case R600_QUERY_GTT_USAGE
:
678 case R600_QUERY_GPU_TEMPERATURE
:
679 case R600_QUERY_CURRENT_GPU_SCLK
:
680 case R600_QUERY_CURRENT_GPU_MCLK
:
681 case R600_QUERY_NUM_COMPILATIONS
:
682 case R600_QUERY_NUM_SHADERS_CREATED
:
683 result
->u64
= query
->end_result
- query
->begin_result
;
685 case R600_QUERY_GPU_LOAD
:
686 result
->u64
= query
->end_result
;
690 map
= r600_buffer_map_sync_with_rings(ctx
, qbuf
->buf
,
692 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
696 /* count all results across all data blocks */
697 switch (query
->type
) {
698 case PIPE_QUERY_OCCLUSION_COUNTER
:
699 while (results_base
!= qbuf
->results_end
) {
701 r600_query_read_result(map
+ results_base
, 0, 2, true);
705 case PIPE_QUERY_OCCLUSION_PREDICATE
:
706 while (results_base
!= qbuf
->results_end
) {
707 result
->b
= result
->b
||
708 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
712 case PIPE_QUERY_TIME_ELAPSED
:
713 while (results_base
!= qbuf
->results_end
) {
715 r600_query_read_result(map
+ results_base
, 0, 2, false);
716 results_base
+= query
->result_size
;
719 case PIPE_QUERY_TIMESTAMP
:
721 uint32_t *current_result
= (uint32_t*)map
;
722 result
->u64
= (uint64_t)current_result
[0] |
723 (uint64_t)current_result
[1] << 32;
726 case PIPE_QUERY_PRIMITIVES_EMITTED
:
727 /* SAMPLE_STREAMOUTSTATS stores this structure:
729 * u64 NumPrimitivesWritten;
730 * u64 PrimitiveStorageNeeded;
732 * We only need NumPrimitivesWritten here. */
733 while (results_base
!= qbuf
->results_end
) {
735 r600_query_read_result(map
+ results_base
, 2, 6, true);
736 results_base
+= query
->result_size
;
739 case PIPE_QUERY_PRIMITIVES_GENERATED
:
740 /* Here we read PrimitiveStorageNeeded. */
741 while (results_base
!= qbuf
->results_end
) {
743 r600_query_read_result(map
+ results_base
, 0, 4, true);
744 results_base
+= query
->result_size
;
747 case PIPE_QUERY_SO_STATISTICS
:
748 while (results_base
!= qbuf
->results_end
) {
749 result
->so_statistics
.num_primitives_written
+=
750 r600_query_read_result(map
+ results_base
, 2, 6, true);
751 result
->so_statistics
.primitives_storage_needed
+=
752 r600_query_read_result(map
+ results_base
, 0, 4, true);
753 results_base
+= query
->result_size
;
756 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
757 while (results_base
!= qbuf
->results_end
) {
758 result
->b
= result
->b
||
759 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
760 r600_query_read_result(map
+ results_base
, 0, 4, true);
761 results_base
+= query
->result_size
;
764 case PIPE_QUERY_PIPELINE_STATISTICS
:
765 if (ctx
->chip_class
>= EVERGREEN
) {
766 while (results_base
!= qbuf
->results_end
) {
767 result
->pipeline_statistics
.ps_invocations
+=
768 r600_query_read_result(map
+ results_base
, 0, 22, false);
769 result
->pipeline_statistics
.c_primitives
+=
770 r600_query_read_result(map
+ results_base
, 2, 24, false);
771 result
->pipeline_statistics
.c_invocations
+=
772 r600_query_read_result(map
+ results_base
, 4, 26, false);
773 result
->pipeline_statistics
.vs_invocations
+=
774 r600_query_read_result(map
+ results_base
, 6, 28, false);
775 result
->pipeline_statistics
.gs_invocations
+=
776 r600_query_read_result(map
+ results_base
, 8, 30, false);
777 result
->pipeline_statistics
.gs_primitives
+=
778 r600_query_read_result(map
+ results_base
, 10, 32, false);
779 result
->pipeline_statistics
.ia_primitives
+=
780 r600_query_read_result(map
+ results_base
, 12, 34, false);
781 result
->pipeline_statistics
.ia_vertices
+=
782 r600_query_read_result(map
+ results_base
, 14, 36, false);
783 result
->pipeline_statistics
.hs_invocations
+=
784 r600_query_read_result(map
+ results_base
, 16, 38, false);
785 result
->pipeline_statistics
.ds_invocations
+=
786 r600_query_read_result(map
+ results_base
, 18, 40, false);
787 result
->pipeline_statistics
.cs_invocations
+=
788 r600_query_read_result(map
+ results_base
, 20, 42, false);
789 results_base
+= query
->result_size
;
792 while (results_base
!= qbuf
->results_end
) {
793 result
->pipeline_statistics
.ps_invocations
+=
794 r600_query_read_result(map
+ results_base
, 0, 16, false);
795 result
->pipeline_statistics
.c_primitives
+=
796 r600_query_read_result(map
+ results_base
, 2, 18, false);
797 result
->pipeline_statistics
.c_invocations
+=
798 r600_query_read_result(map
+ results_base
, 4, 20, false);
799 result
->pipeline_statistics
.vs_invocations
+=
800 r600_query_read_result(map
+ results_base
, 6, 22, false);
801 result
->pipeline_statistics
.gs_invocations
+=
802 r600_query_read_result(map
+ results_base
, 8, 24, false);
803 result
->pipeline_statistics
.gs_primitives
+=
804 r600_query_read_result(map
+ results_base
, 10, 26, false);
805 result
->pipeline_statistics
.ia_primitives
+=
806 r600_query_read_result(map
+ results_base
, 12, 28, false);
807 result
->pipeline_statistics
.ia_vertices
+=
808 r600_query_read_result(map
+ results_base
, 14, 30, false);
809 results_base
+= query
->result_size
;
812 #if 0 /* for testing */
813 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
814 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
815 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
816 result
->pipeline_statistics
.ia_vertices
,
817 result
->pipeline_statistics
.ia_primitives
,
818 result
->pipeline_statistics
.vs_invocations
,
819 result
->pipeline_statistics
.hs_invocations
,
820 result
->pipeline_statistics
.ds_invocations
,
821 result
->pipeline_statistics
.gs_invocations
,
822 result
->pipeline_statistics
.gs_primitives
,
823 result
->pipeline_statistics
.c_invocations
,
824 result
->pipeline_statistics
.c_primitives
,
825 result
->pipeline_statistics
.ps_invocations
,
826 result
->pipeline_statistics
.cs_invocations
);
836 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
837 struct pipe_query
*query
, boolean wait
,
838 union pipe_query_result
*result
)
840 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
841 struct r600_query
*rquery
= (struct r600_query
*)query
;
843 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
846 static boolean
r600_do_get_query_result(struct r600_common_context
*rctx
,
847 struct r600_query
*rquery
,
848 boolean wait
, union pipe_query_result
*result
)
850 struct r600_query_buffer
*qbuf
;
852 util_query_clear_result(result
, rquery
->type
);
854 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
855 if (!r600_get_query_buffer_result(rctx
, rquery
, qbuf
, wait
, result
)) {
860 /* Convert the time to expected units. */
861 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
862 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
863 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.r600_clock_crystal_freq
;
868 static void r600_render_condition(struct pipe_context
*ctx
,
869 struct pipe_query
*query
,
873 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
874 struct r600_query
*rquery
= (struct r600_query
*)query
;
875 struct r600_query_buffer
*qbuf
;
876 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
878 rctx
->render_cond
= query
;
879 rctx
->render_cond_invert
= condition
;
880 rctx
->render_cond_mode
= mode
;
882 /* Compute the size of SET_PREDICATION packets. */
884 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
885 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
887 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
890 static void r600_suspend_queries(struct r600_common_context
*ctx
,
891 struct list_head
*query_list
,
892 unsigned *num_cs_dw_queries_suspend
)
894 struct r600_query
*query
;
896 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
897 r600_emit_query_end(ctx
, query
);
899 assert(*num_cs_dw_queries_suspend
== 0);
902 void r600_suspend_nontimer_queries(struct r600_common_context
*ctx
)
904 r600_suspend_queries(ctx
, &ctx
->active_nontimer_queries
,
905 &ctx
->num_cs_dw_nontimer_queries_suspend
);
908 void r600_suspend_timer_queries(struct r600_common_context
*ctx
)
910 r600_suspend_queries(ctx
, &ctx
->active_timer_queries
,
911 &ctx
->num_cs_dw_timer_queries_suspend
);
914 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
915 struct list_head
*query_list
)
917 struct r600_query
*query
;
920 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
922 num_dw
+= query
->num_cs_dw
* 2;
924 /* Workaround for the fact that
925 * num_cs_dw_nontimer_queries_suspend is incremented for every
926 * resumed query, which raises the bar in need_cs_space for
927 * queries about to be resumed.
929 num_dw
+= query
->num_cs_dw
;
931 /* primitives generated query */
932 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
933 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
939 static void r600_resume_queries(struct r600_common_context
*ctx
,
940 struct list_head
*query_list
,
941 unsigned *num_cs_dw_queries_suspend
)
943 struct r600_query
*query
;
944 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, query_list
);
946 assert(*num_cs_dw_queries_suspend
== 0);
948 /* Check CS space here. Resuming must not be interrupted by flushes. */
949 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, TRUE
);
951 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
952 r600_emit_query_begin(ctx
, query
);
956 void r600_resume_nontimer_queries(struct r600_common_context
*ctx
)
958 r600_resume_queries(ctx
, &ctx
->active_nontimer_queries
,
959 &ctx
->num_cs_dw_nontimer_queries_suspend
);
962 void r600_resume_timer_queries(struct r600_common_context
*ctx
)
964 r600_resume_queries(ctx
, &ctx
->active_timer_queries
,
965 &ctx
->num_cs_dw_timer_queries_suspend
);
968 /* Get backends mask */
969 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
971 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
972 struct r600_resource
*buffer
;
974 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
975 unsigned i
, mask
= 0;
977 /* if backend_map query is supported by the kernel */
978 if (ctx
->screen
->info
.r600_backend_map_valid
) {
979 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
980 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
981 unsigned item_width
, item_mask
;
983 if (ctx
->chip_class
>= EVERGREEN
) {
991 while(num_tile_pipes
--) {
992 i
= backend_map
& item_mask
;
994 backend_map
>>= item_width
;
997 ctx
->backend_mask
= mask
;
1002 /* otherwise backup path for older kernels */
1004 /* create buffer for event data */
1005 buffer
= (struct r600_resource
*)
1006 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
1007 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
1011 /* initialize buffer with zeroes */
1012 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
1014 memset(results
, 0, ctx
->max_db
* 4 * 4);
1016 /* emit EVENT_WRITE for ZPASS_DONE */
1017 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1018 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
1019 radeon_emit(cs
, buffer
->gpu_address
);
1020 radeon_emit(cs
, buffer
->gpu_address
>> 32);
1022 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
1023 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
1025 /* analyze results */
1026 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
1028 for(i
= 0; i
< ctx
->max_db
; i
++) {
1029 /* at least highest bit will be set if backend is used */
1030 if (results
[i
*4 + 1])
1036 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
1039 ctx
->backend_mask
= mask
;
1044 /* fallback to old method - set num_backends lower bits to 1 */
1045 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
1049 #define X(name_, query_type_, type_, result_type_) \
1052 .query_type = R600_QUERY_##query_type_, \
1053 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1054 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1055 .group_id = ~(unsigned)0 \
1058 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1059 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1060 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1061 X("draw-calls", DRAW_CALLS
, UINT64
, CUMULATIVE
),
1062 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1063 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1064 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1065 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, CUMULATIVE
),
1066 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1067 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1068 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1069 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1070 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1071 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1072 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1077 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1079 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1080 return Elements(r600_driver_query_list
);
1081 else if (rscreen
->info
.drm_major
== 3)
1082 return Elements(r600_driver_query_list
) - 3;
1084 return Elements(r600_driver_query_list
) - 4;
1087 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
1089 struct pipe_driver_query_info
*info
)
1091 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1092 unsigned num_queries
= r600_get_num_queries(rscreen
);
1097 if (index
>= num_queries
)
1100 *info
= r600_driver_query_list
[index
];
1102 switch (info
->query_type
) {
1103 case R600_QUERY_REQUESTED_VRAM
:
1104 case R600_QUERY_VRAM_USAGE
:
1105 info
->max_value
.u64
= rscreen
->info
.vram_size
;
1107 case R600_QUERY_REQUESTED_GTT
:
1108 case R600_QUERY_GTT_USAGE
:
1109 info
->max_value
.u64
= rscreen
->info
.gart_size
;
1111 case R600_QUERY_GPU_TEMPERATURE
:
1112 info
->max_value
.u64
= 125;
1119 void r600_query_init(struct r600_common_context
*rctx
)
1121 rctx
->b
.create_query
= r600_create_query
;
1122 rctx
->b
.destroy_query
= r600_destroy_query
;
1123 rctx
->b
.begin_query
= r600_begin_query
;
1124 rctx
->b
.end_query
= r600_end_query
;
1125 rctx
->b
.get_query_result
= r600_get_query_result
;
1126 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
1128 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.r600_num_backends
> 0)
1129 rctx
->b
.render_condition
= r600_render_condition
;
1131 LIST_INITHEAD(&rctx
->active_nontimer_queries
);
1132 LIST_INITHEAD(&rctx
->active_timer_queries
);
1135 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
1137 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;