2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "util/u_memory.h"
29 struct r600_query_buffer
{
30 /* The buffer where query results are stored. */
31 struct r600_resource
*buf
;
32 /* Offset of the next free result after current query data */
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer
*previous
;
41 /* The query buffer and how many results are in it. */
42 struct r600_query_buffer buffer
;
43 /* The type of query */
45 /* Size of the result in memory for both begin_query and end_query,
46 * this can be one or two numbers, or it could even be a size of a structure. */
48 /* The number of dwords for begin_query or end_query. */
50 /* linked list of queries */
51 struct list_head list
;
52 /* for custom non-GPU queries */
53 uint64_t begin_result
;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle
*fence
;
60 static bool r600_is_timer_query(unsigned type
)
62 return type
== PIPE_QUERY_TIME_ELAPSED
||
63 type
== PIPE_QUERY_TIMESTAMP
;
66 static bool r600_query_needs_begin(unsigned type
)
68 return type
!= PIPE_QUERY_GPU_FINISHED
&&
69 type
!= PIPE_QUERY_TIMESTAMP
;
72 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
, unsigned type
)
74 unsigned j
, i
, num_results
, buf_size
= 4096;
77 /* Non-GPU queries. */
79 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
80 case PIPE_QUERY_GPU_FINISHED
:
81 case R600_QUERY_DRAW_CALLS
:
82 case R600_QUERY_REQUESTED_VRAM
:
83 case R600_QUERY_REQUESTED_GTT
:
84 case R600_QUERY_BUFFER_WAIT_TIME
:
85 case R600_QUERY_NUM_CS_FLUSHES
:
86 case R600_QUERY_NUM_BYTES_MOVED
:
87 case R600_QUERY_VRAM_USAGE
:
88 case R600_QUERY_GTT_USAGE
:
89 case R600_QUERY_GPU_TEMPERATURE
:
90 case R600_QUERY_CURRENT_GPU_SCLK
:
91 case R600_QUERY_CURRENT_GPU_MCLK
:
92 case R600_QUERY_GPU_LOAD
:
96 /* Queries are normally read by the CPU after
97 * being written by the gpu, hence staging is probably a good
100 struct r600_resource
*buf
= (struct r600_resource
*)
101 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
102 PIPE_USAGE_STAGING
, buf_size
);
105 case PIPE_QUERY_OCCLUSION_COUNTER
:
106 case PIPE_QUERY_OCCLUSION_PREDICATE
:
107 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
108 memset(results
, 0, buf_size
);
110 /* Set top bits for unused backends. */
111 num_results
= buf_size
/ (16 * ctx
->max_db
);
112 for (j
= 0; j
< num_results
; j
++) {
113 for (i
= 0; i
< ctx
->max_db
; i
++) {
114 if (!(ctx
->backend_mask
& (1<<i
))) {
115 results
[(i
* 4)+1] = 0x80000000;
116 results
[(i
* 4)+3] = 0x80000000;
119 results
+= 4 * ctx
->max_db
;
121 ctx
->ws
->buffer_unmap(buf
->cs_buf
);
123 case PIPE_QUERY_TIME_ELAPSED
:
124 case PIPE_QUERY_TIMESTAMP
:
126 case PIPE_QUERY_PRIMITIVES_EMITTED
:
127 case PIPE_QUERY_PRIMITIVES_GENERATED
:
128 case PIPE_QUERY_SO_STATISTICS
:
129 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
130 case PIPE_QUERY_PIPELINE_STATISTICS
:
131 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
132 memset(results
, 0, buf_size
);
133 ctx
->ws
->buffer_unmap(buf
->cs_buf
);
141 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
142 unsigned type
, int diff
)
144 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
145 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
146 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
149 rctx
->num_occlusion_queries
+= diff
;
150 assert(rctx
->num_occlusion_queries
>= 0);
152 enable
= rctx
->num_occlusion_queries
!= 0;
154 if (enable
!= old_enable
) {
155 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
160 static void r600_emit_query_begin(struct r600_common_context
*ctx
, struct r600_query
*query
)
162 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
165 r600_update_occlusion_query_state(ctx
, query
->type
, 1);
166 r600_update_prims_generated_query_state(ctx
, query
->type
, 1);
167 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
* 2, TRUE
);
169 /* Get a new query buffer if needed. */
170 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
171 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
172 *qbuf
= query
->buffer
;
173 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
->type
);
174 query
->buffer
.results_end
= 0;
175 query
->buffer
.previous
= qbuf
;
178 /* emit begin query */
179 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
181 switch (query
->type
) {
182 case PIPE_QUERY_OCCLUSION_COUNTER
:
183 case PIPE_QUERY_OCCLUSION_PREDICATE
:
184 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
185 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
187 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
189 case PIPE_QUERY_PRIMITIVES_EMITTED
:
190 case PIPE_QUERY_PRIMITIVES_GENERATED
:
191 case PIPE_QUERY_SO_STATISTICS
:
192 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
193 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
194 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3));
196 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
198 case PIPE_QUERY_TIME_ELAPSED
:
199 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
200 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
202 radeon_emit(cs
, (3 << 29) | ((va
>> 32UL) & 0xFF));
206 case PIPE_QUERY_PIPELINE_STATISTICS
:
207 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
208 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
210 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
215 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
218 if (!r600_is_timer_query(query
->type
)) {
219 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
223 static void r600_emit_query_end(struct r600_common_context
*ctx
, struct r600_query
*query
)
225 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
228 /* The queries which need begin already called this in begin_query. */
229 if (!r600_query_needs_begin(query
->type
)) {
230 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
, FALSE
);
233 va
= query
->buffer
.buf
->gpu_address
;
236 switch (query
->type
) {
237 case PIPE_QUERY_OCCLUSION_COUNTER
:
238 case PIPE_QUERY_OCCLUSION_PREDICATE
:
239 va
+= query
->buffer
.results_end
+ 8;
240 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
241 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
243 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
245 case PIPE_QUERY_PRIMITIVES_EMITTED
:
246 case PIPE_QUERY_PRIMITIVES_GENERATED
:
247 case PIPE_QUERY_SO_STATISTICS
:
248 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
249 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
250 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
251 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3));
253 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
255 case PIPE_QUERY_TIME_ELAPSED
:
256 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
258 case PIPE_QUERY_TIMESTAMP
:
259 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
260 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
262 radeon_emit(cs
, (3 << 29) | ((va
>> 32UL) & 0xFF));
266 case PIPE_QUERY_PIPELINE_STATISTICS
:
267 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
268 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
269 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
271 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
276 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
279 query
->buffer
.results_end
+= query
->result_size
;
281 if (r600_query_needs_begin(query
->type
)) {
282 if (!r600_is_timer_query(query
->type
)) {
283 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
287 r600_update_occlusion_query_state(ctx
, query
->type
, -1);
288 r600_update_prims_generated_query_state(ctx
, query
->type
, -1);
291 static void r600_emit_query_predication(struct r600_common_context
*ctx
, struct r600_query
*query
,
292 int operation
, bool flag_wait
)
294 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
296 if (operation
== PREDICATION_OP_CLEAR
) {
297 ctx
->need_gfx_cs_space(&ctx
->b
, 3, FALSE
);
299 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
301 radeon_emit(cs
, PRED_OP(PREDICATION_OP_CLEAR
));
303 struct r600_query_buffer
*qbuf
;
307 /* Find how many results there are. */
309 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
310 count
+= qbuf
->results_end
/ query
->result_size
;
313 ctx
->need_gfx_cs_space(&ctx
->b
, 5 * count
, TRUE
);
315 op
= PRED_OP(operation
) | PREDICATION_DRAW_VISIBLE
|
316 (flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
);
318 /* emit predicate packets for all data blocks */
319 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
320 unsigned results_base
= 0;
321 uint64_t va
= qbuf
->buf
->gpu_address
;
323 while (results_base
< qbuf
->results_end
) {
324 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
325 radeon_emit(cs
, (va
+ results_base
) & 0xFFFFFFFFUL
);
326 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32UL) & 0xFF));
327 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
329 results_base
+= query
->result_size
;
331 /* set CONTINUE bit for all packets except the first */
332 op
|= PREDICATION_CONTINUE
;
338 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
340 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
341 struct r600_query
*query
;
342 bool skip_allocation
= false;
344 query
= CALLOC_STRUCT(r600_query
);
348 query
->type
= query_type
;
350 switch (query_type
) {
351 case PIPE_QUERY_OCCLUSION_COUNTER
:
352 case PIPE_QUERY_OCCLUSION_PREDICATE
:
353 query
->result_size
= 16 * rctx
->max_db
;
354 query
->num_cs_dw
= 6;
357 case PIPE_QUERY_TIME_ELAPSED
:
358 query
->result_size
= 16;
359 query
->num_cs_dw
= 8;
361 case PIPE_QUERY_TIMESTAMP
:
362 query
->result_size
= 8;
363 query
->num_cs_dw
= 8;
365 case PIPE_QUERY_PRIMITIVES_EMITTED
:
366 case PIPE_QUERY_PRIMITIVES_GENERATED
:
367 case PIPE_QUERY_SO_STATISTICS
:
368 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
369 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
370 query
->result_size
= 32;
371 query
->num_cs_dw
= 6;
373 case PIPE_QUERY_PIPELINE_STATISTICS
:
374 /* 11 values on EG, 8 on R600. */
375 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
376 query
->num_cs_dw
= 6;
378 /* Non-GPU queries and queries not requiring a buffer. */
379 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
380 case PIPE_QUERY_GPU_FINISHED
:
381 case R600_QUERY_DRAW_CALLS
:
382 case R600_QUERY_REQUESTED_VRAM
:
383 case R600_QUERY_REQUESTED_GTT
:
384 case R600_QUERY_BUFFER_WAIT_TIME
:
385 case R600_QUERY_NUM_CS_FLUSHES
:
386 case R600_QUERY_NUM_BYTES_MOVED
:
387 case R600_QUERY_VRAM_USAGE
:
388 case R600_QUERY_GTT_USAGE
:
389 case R600_QUERY_GPU_TEMPERATURE
:
390 case R600_QUERY_CURRENT_GPU_SCLK
:
391 case R600_QUERY_CURRENT_GPU_MCLK
:
392 case R600_QUERY_GPU_LOAD
:
393 skip_allocation
= true;
401 if (!skip_allocation
) {
402 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query_type
);
403 if (!query
->buffer
.buf
) {
408 return (struct pipe_query
*)query
;
411 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
413 struct r600_query
*rquery
= (struct r600_query
*)query
;
414 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
416 /* Release all query buffers. */
418 struct r600_query_buffer
*qbuf
= prev
;
419 prev
= prev
->previous
;
420 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
424 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
428 static boolean
r600_begin_query(struct pipe_context
*ctx
,
429 struct pipe_query
*query
)
431 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
432 struct r600_query
*rquery
= (struct r600_query
*)query
;
433 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
435 if (!r600_query_needs_begin(rquery
->type
)) {
440 /* Non-GPU queries. */
441 switch (rquery
->type
) {
442 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
444 case R600_QUERY_DRAW_CALLS
:
445 rquery
->begin_result
= rctx
->num_draw_calls
;
447 case R600_QUERY_REQUESTED_VRAM
:
448 case R600_QUERY_REQUESTED_GTT
:
449 case R600_QUERY_VRAM_USAGE
:
450 case R600_QUERY_GTT_USAGE
:
451 case R600_QUERY_GPU_TEMPERATURE
:
452 case R600_QUERY_CURRENT_GPU_SCLK
:
453 case R600_QUERY_CURRENT_GPU_MCLK
:
454 rquery
->begin_result
= 0;
456 case R600_QUERY_BUFFER_WAIT_TIME
:
457 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
);
459 case R600_QUERY_NUM_CS_FLUSHES
:
460 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
462 case R600_QUERY_NUM_BYTES_MOVED
:
463 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
465 case R600_QUERY_GPU_LOAD
:
466 rquery
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
470 /* Discard the old query buffers. */
472 struct r600_query_buffer
*qbuf
= prev
;
473 prev
= prev
->previous
;
474 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
478 /* Obtain a new buffer if the current one can't be mapped without a stall. */
479 if (r600_rings_is_buffer_referenced(rctx
, rquery
->buffer
.buf
->cs_buf
, RADEON_USAGE_READWRITE
) ||
480 rctx
->ws
->buffer_is_busy(rquery
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
)) {
481 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
482 rquery
->buffer
.buf
= r600_new_query_buffer(rctx
, rquery
->type
);
485 rquery
->buffer
.results_end
= 0;
486 rquery
->buffer
.previous
= NULL
;
488 r600_emit_query_begin(rctx
, rquery
);
490 if (!r600_is_timer_query(rquery
->type
)) {
491 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_nontimer_queries
);
496 static void r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
498 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
499 struct r600_query
*rquery
= (struct r600_query
*)query
;
501 /* Non-GPU queries. */
502 switch (rquery
->type
) {
503 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
505 case PIPE_QUERY_GPU_FINISHED
:
506 rctx
->rings
.gfx
.flush(rctx
, RADEON_FLUSH_ASYNC
, &rquery
->fence
);
508 case R600_QUERY_DRAW_CALLS
:
509 rquery
->end_result
= rctx
->num_draw_calls
;
511 case R600_QUERY_REQUESTED_VRAM
:
512 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_VRAM_MEMORY
);
514 case R600_QUERY_REQUESTED_GTT
:
515 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_GTT_MEMORY
);
517 case R600_QUERY_BUFFER_WAIT_TIME
:
518 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
);
520 case R600_QUERY_NUM_CS_FLUSHES
:
521 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
523 case R600_QUERY_NUM_BYTES_MOVED
:
524 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
526 case R600_QUERY_VRAM_USAGE
:
527 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_VRAM_USAGE
);
529 case R600_QUERY_GTT_USAGE
:
530 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GTT_USAGE
);
532 case R600_QUERY_GPU_TEMPERATURE
:
533 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GPU_TEMPERATURE
) / 1000;
535 case R600_QUERY_CURRENT_GPU_SCLK
:
536 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_SCLK
) * 1000000;
538 case R600_QUERY_CURRENT_GPU_MCLK
:
539 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_MCLK
) * 1000000;
541 case R600_QUERY_GPU_LOAD
:
542 rquery
->end_result
= r600_gpu_load_end(rctx
->screen
, rquery
->begin_result
);
546 r600_emit_query_end(rctx
, rquery
);
548 if (r600_query_needs_begin(rquery
->type
) && !r600_is_timer_query(rquery
->type
)) {
549 LIST_DELINIT(&rquery
->list
);
553 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
554 bool test_status_bit
)
556 uint32_t *current_result
= (uint32_t*)map
;
559 start
= (uint64_t)current_result
[start_index
] |
560 (uint64_t)current_result
[start_index
+1] << 32;
561 end
= (uint64_t)current_result
[end_index
] |
562 (uint64_t)current_result
[end_index
+1] << 32;
564 if (!test_status_bit
||
565 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
571 static boolean
r600_get_query_buffer_result(struct r600_common_context
*ctx
,
572 struct r600_query
*query
,
573 struct r600_query_buffer
*qbuf
,
575 union pipe_query_result
*result
)
577 struct pipe_screen
*screen
= ctx
->b
.screen
;
578 unsigned results_base
= 0;
581 /* Non-GPU queries. */
582 switch (query
->type
) {
583 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
584 /* Convert from cycles per millisecond to cycles per second (Hz). */
585 result
->timestamp_disjoint
.frequency
=
586 (uint64_t)ctx
->screen
->info
.r600_clock_crystal_freq
* 1000;
587 result
->timestamp_disjoint
.disjoint
= FALSE
;
589 case PIPE_QUERY_GPU_FINISHED
:
590 result
->b
= screen
->fence_finish(screen
, query
->fence
,
591 wait
? PIPE_TIMEOUT_INFINITE
: 0);
593 case R600_QUERY_DRAW_CALLS
:
594 case R600_QUERY_REQUESTED_VRAM
:
595 case R600_QUERY_REQUESTED_GTT
:
596 case R600_QUERY_BUFFER_WAIT_TIME
:
597 case R600_QUERY_NUM_CS_FLUSHES
:
598 case R600_QUERY_NUM_BYTES_MOVED
:
599 case R600_QUERY_VRAM_USAGE
:
600 case R600_QUERY_GTT_USAGE
:
601 case R600_QUERY_GPU_TEMPERATURE
:
602 case R600_QUERY_CURRENT_GPU_SCLK
:
603 case R600_QUERY_CURRENT_GPU_MCLK
:
604 result
->u64
= query
->end_result
- query
->begin_result
;
606 case R600_QUERY_GPU_LOAD
:
607 result
->u64
= query
->end_result
;
611 map
= r600_buffer_map_sync_with_rings(ctx
, qbuf
->buf
,
613 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
617 /* count all results across all data blocks */
618 switch (query
->type
) {
619 case PIPE_QUERY_OCCLUSION_COUNTER
:
620 while (results_base
!= qbuf
->results_end
) {
622 r600_query_read_result(map
+ results_base
, 0, 2, true);
626 case PIPE_QUERY_OCCLUSION_PREDICATE
:
627 while (results_base
!= qbuf
->results_end
) {
628 result
->b
= result
->b
||
629 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
633 case PIPE_QUERY_TIME_ELAPSED
:
634 while (results_base
!= qbuf
->results_end
) {
636 r600_query_read_result(map
+ results_base
, 0, 2, false);
637 results_base
+= query
->result_size
;
640 case PIPE_QUERY_TIMESTAMP
:
642 uint32_t *current_result
= (uint32_t*)map
;
643 result
->u64
= (uint64_t)current_result
[0] |
644 (uint64_t)current_result
[1] << 32;
647 case PIPE_QUERY_PRIMITIVES_EMITTED
:
648 /* SAMPLE_STREAMOUTSTATS stores this structure:
650 * u64 NumPrimitivesWritten;
651 * u64 PrimitiveStorageNeeded;
653 * We only need NumPrimitivesWritten here. */
654 while (results_base
!= qbuf
->results_end
) {
656 r600_query_read_result(map
+ results_base
, 2, 6, true);
657 results_base
+= query
->result_size
;
660 case PIPE_QUERY_PRIMITIVES_GENERATED
:
661 /* Here we read PrimitiveStorageNeeded. */
662 while (results_base
!= qbuf
->results_end
) {
664 r600_query_read_result(map
+ results_base
, 0, 4, true);
665 results_base
+= query
->result_size
;
668 case PIPE_QUERY_SO_STATISTICS
:
669 while (results_base
!= qbuf
->results_end
) {
670 result
->so_statistics
.num_primitives_written
+=
671 r600_query_read_result(map
+ results_base
, 2, 6, true);
672 result
->so_statistics
.primitives_storage_needed
+=
673 r600_query_read_result(map
+ results_base
, 0, 4, true);
674 results_base
+= query
->result_size
;
677 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
678 while (results_base
!= qbuf
->results_end
) {
679 result
->b
= result
->b
||
680 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
681 r600_query_read_result(map
+ results_base
, 0, 4, true);
682 results_base
+= query
->result_size
;
685 case PIPE_QUERY_PIPELINE_STATISTICS
:
686 if (ctx
->chip_class
>= EVERGREEN
) {
687 while (results_base
!= qbuf
->results_end
) {
688 result
->pipeline_statistics
.ps_invocations
+=
689 r600_query_read_result(map
+ results_base
, 0, 22, false);
690 result
->pipeline_statistics
.c_primitives
+=
691 r600_query_read_result(map
+ results_base
, 2, 24, false);
692 result
->pipeline_statistics
.c_invocations
+=
693 r600_query_read_result(map
+ results_base
, 4, 26, false);
694 result
->pipeline_statistics
.vs_invocations
+=
695 r600_query_read_result(map
+ results_base
, 6, 28, false);
696 result
->pipeline_statistics
.gs_invocations
+=
697 r600_query_read_result(map
+ results_base
, 8, 30, false);
698 result
->pipeline_statistics
.gs_primitives
+=
699 r600_query_read_result(map
+ results_base
, 10, 32, false);
700 result
->pipeline_statistics
.ia_primitives
+=
701 r600_query_read_result(map
+ results_base
, 12, 34, false);
702 result
->pipeline_statistics
.ia_vertices
+=
703 r600_query_read_result(map
+ results_base
, 14, 36, false);
704 result
->pipeline_statistics
.hs_invocations
+=
705 r600_query_read_result(map
+ results_base
, 16, 38, false);
706 result
->pipeline_statistics
.ds_invocations
+=
707 r600_query_read_result(map
+ results_base
, 18, 40, false);
708 result
->pipeline_statistics
.cs_invocations
+=
709 r600_query_read_result(map
+ results_base
, 20, 42, false);
710 results_base
+= query
->result_size
;
713 while (results_base
!= qbuf
->results_end
) {
714 result
->pipeline_statistics
.ps_invocations
+=
715 r600_query_read_result(map
+ results_base
, 0, 16, false);
716 result
->pipeline_statistics
.c_primitives
+=
717 r600_query_read_result(map
+ results_base
, 2, 18, false);
718 result
->pipeline_statistics
.c_invocations
+=
719 r600_query_read_result(map
+ results_base
, 4, 20, false);
720 result
->pipeline_statistics
.vs_invocations
+=
721 r600_query_read_result(map
+ results_base
, 6, 22, false);
722 result
->pipeline_statistics
.gs_invocations
+=
723 r600_query_read_result(map
+ results_base
, 8, 24, false);
724 result
->pipeline_statistics
.gs_primitives
+=
725 r600_query_read_result(map
+ results_base
, 10, 26, false);
726 result
->pipeline_statistics
.ia_primitives
+=
727 r600_query_read_result(map
+ results_base
, 12, 28, false);
728 result
->pipeline_statistics
.ia_vertices
+=
729 r600_query_read_result(map
+ results_base
, 14, 30, false);
730 results_base
+= query
->result_size
;
733 #if 0 /* for testing */
734 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
735 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
736 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
737 result
->pipeline_statistics
.ia_vertices
,
738 result
->pipeline_statistics
.ia_primitives
,
739 result
->pipeline_statistics
.vs_invocations
,
740 result
->pipeline_statistics
.hs_invocations
,
741 result
->pipeline_statistics
.ds_invocations
,
742 result
->pipeline_statistics
.gs_invocations
,
743 result
->pipeline_statistics
.gs_primitives
,
744 result
->pipeline_statistics
.c_invocations
,
745 result
->pipeline_statistics
.c_primitives
,
746 result
->pipeline_statistics
.ps_invocations
,
747 result
->pipeline_statistics
.cs_invocations
);
754 ctx
->ws
->buffer_unmap(qbuf
->buf
->cs_buf
);
758 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
759 struct pipe_query
*query
,
760 boolean wait
, union pipe_query_result
*result
)
762 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
763 struct r600_query
*rquery
= (struct r600_query
*)query
;
764 struct r600_query_buffer
*qbuf
;
766 util_query_clear_result(result
, rquery
->type
);
768 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
769 if (!r600_get_query_buffer_result(rctx
, rquery
, qbuf
, wait
, result
)) {
774 /* Convert the time to expected units. */
775 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
776 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
777 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.r600_clock_crystal_freq
;
782 static void r600_render_condition(struct pipe_context
*ctx
,
783 struct pipe_query
*query
,
787 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
788 struct r600_query
*rquery
= (struct r600_query
*)query
;
789 bool wait_flag
= false;
791 rctx
->current_render_cond
= query
;
792 rctx
->current_render_cond_cond
= condition
;
793 rctx
->current_render_cond_mode
= mode
;
796 if (rctx
->predicate_drawing
) {
797 rctx
->predicate_drawing
= false;
798 r600_emit_query_predication(rctx
, NULL
, PREDICATION_OP_CLEAR
, false);
803 if (mode
== PIPE_RENDER_COND_WAIT
||
804 mode
== PIPE_RENDER_COND_BY_REGION_WAIT
) {
808 rctx
->predicate_drawing
= true;
810 switch (rquery
->type
) {
811 case PIPE_QUERY_OCCLUSION_COUNTER
:
812 case PIPE_QUERY_OCCLUSION_PREDICATE
:
813 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_ZPASS
, wait_flag
);
815 case PIPE_QUERY_PRIMITIVES_EMITTED
:
816 case PIPE_QUERY_PRIMITIVES_GENERATED
:
817 case PIPE_QUERY_SO_STATISTICS
:
818 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
819 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_PRIMCOUNT
, wait_flag
);
826 void r600_suspend_nontimer_queries(struct r600_common_context
*ctx
)
828 struct r600_query
*query
;
830 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_queries
, list
) {
831 r600_emit_query_end(ctx
, query
);
833 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
836 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
)
838 struct r600_query
*query
;
841 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_queries
, list
) {
843 num_dw
+= query
->num_cs_dw
* 2;
845 /* Workaround for the fact that
846 * num_cs_dw_nontimer_queries_suspend is incremented for every
847 * resumed query, which raises the bar in need_cs_space for
848 * queries about to be resumed.
850 num_dw
+= query
->num_cs_dw
;
852 /* primitives generated query */
853 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
854 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
860 void r600_resume_nontimer_queries(struct r600_common_context
*ctx
)
862 struct r600_query
*query
;
864 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
866 /* Check CS space here. Resuming must not be interrupted by flushes. */
867 ctx
->need_gfx_cs_space(&ctx
->b
,
868 r600_queries_num_cs_dw_for_resuming(ctx
), TRUE
);
870 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_queries
, list
) {
871 r600_emit_query_begin(ctx
, query
);
875 /* Get backends mask */
876 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
878 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
879 struct r600_resource
*buffer
;
881 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
882 unsigned i
, mask
= 0;
884 /* if backend_map query is supported by the kernel */
885 if (ctx
->screen
->info
.r600_backend_map_valid
) {
886 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
887 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
888 unsigned item_width
, item_mask
;
890 if (ctx
->chip_class
>= EVERGREEN
) {
898 while(num_tile_pipes
--) {
899 i
= backend_map
& item_mask
;
901 backend_map
>>= item_width
;
904 ctx
->backend_mask
= mask
;
909 /* otherwise backup path for older kernels */
911 /* create buffer for event data */
912 buffer
= (struct r600_resource
*)
913 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
914 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
918 /* initialize buffer with zeroes */
919 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
921 memset(results
, 0, ctx
->max_db
* 4 * 4);
922 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
924 /* emit EVENT_WRITE for ZPASS_DONE */
925 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
926 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
927 radeon_emit(cs
, buffer
->gpu_address
);
928 radeon_emit(cs
, buffer
->gpu_address
>> 32);
930 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, buffer
, RADEON_USAGE_WRITE
, RADEON_PRIO_MIN
);
932 /* analyze results */
933 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
935 for(i
= 0; i
< ctx
->max_db
; i
++) {
936 /* at least highest bit will be set if backend is used */
937 if (results
[i
*4 + 1])
940 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
944 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
947 ctx
->backend_mask
= mask
;
952 /* fallback to old method - set num_backends lower bits to 1 */
953 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
957 void r600_query_init(struct r600_common_context
*rctx
)
959 rctx
->b
.create_query
= r600_create_query
;
960 rctx
->b
.destroy_query
= r600_destroy_query
;
961 rctx
->b
.begin_query
= r600_begin_query
;
962 rctx
->b
.end_query
= r600_end_query
;
963 rctx
->b
.get_query_result
= r600_get_query_result
;
965 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.r600_num_backends
> 0)
966 rctx
->b
.render_condition
= r600_render_condition
;
968 LIST_INITHEAD(&rctx
->active_nontimer_queries
);