2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "util/u_memory.h"
29 struct r600_query_buffer
{
30 /* The buffer where query results are stored. */
31 struct r600_resource
*buf
;
32 /* Offset of the next free result after current query data */
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer
*previous
;
41 /* The query buffer and how many results are in it. */
42 struct r600_query_buffer buffer
;
43 /* The type of query */
45 /* Size of the result in memory for both begin_query and end_query,
46 * this can be one or two numbers, or it could even be a size of a structure. */
48 /* The number of dwords for begin_query or end_query. */
50 /* linked list of queries */
51 struct list_head list
;
52 /* for custom non-GPU queries */
53 uint64_t begin_result
;
58 static bool r600_is_timer_query(unsigned type
)
60 return type
== PIPE_QUERY_TIME_ELAPSED
||
61 type
== PIPE_QUERY_TIMESTAMP
||
62 type
== PIPE_QUERY_TIMESTAMP_DISJOINT
;
65 static bool r600_query_needs_begin(unsigned type
)
67 return type
!= PIPE_QUERY_GPU_FINISHED
&&
68 type
!= PIPE_QUERY_TIMESTAMP
;
71 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
, unsigned type
)
73 unsigned j
, i
, num_results
, buf_size
= 4096;
76 /* Non-GPU queries. */
78 case R600_QUERY_DRAW_CALLS
:
79 case R600_QUERY_REQUESTED_VRAM
:
80 case R600_QUERY_REQUESTED_GTT
:
81 case R600_QUERY_BUFFER_WAIT_TIME
:
82 case R600_QUERY_NUM_CS_FLUSHES
:
83 case R600_QUERY_NUM_BYTES_MOVED
:
84 case R600_QUERY_VRAM_USAGE
:
85 case R600_QUERY_GTT_USAGE
:
89 /* Queries are normally read by the CPU after
90 * being written by the gpu, hence staging is probably a good
93 struct r600_resource
*buf
= (struct r600_resource
*)
94 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
95 PIPE_USAGE_STAGING
, buf_size
);
98 case PIPE_QUERY_OCCLUSION_COUNTER
:
99 case PIPE_QUERY_OCCLUSION_PREDICATE
:
100 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
101 memset(results
, 0, buf_size
);
103 /* Set top bits for unused backends. */
104 num_results
= buf_size
/ (16 * ctx
->max_db
);
105 for (j
= 0; j
< num_results
; j
++) {
106 for (i
= 0; i
< ctx
->max_db
; i
++) {
107 if (!(ctx
->backend_mask
& (1<<i
))) {
108 results
[(i
* 4)+1] = 0x80000000;
109 results
[(i
* 4)+3] = 0x80000000;
112 results
+= 4 * ctx
->max_db
;
114 ctx
->ws
->buffer_unmap(buf
->cs_buf
);
116 case PIPE_QUERY_TIME_ELAPSED
:
117 case PIPE_QUERY_TIMESTAMP
:
119 case PIPE_QUERY_PRIMITIVES_EMITTED
:
120 case PIPE_QUERY_PRIMITIVES_GENERATED
:
121 case PIPE_QUERY_SO_STATISTICS
:
122 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
123 case PIPE_QUERY_PIPELINE_STATISTICS
:
124 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
125 memset(results
, 0, buf_size
);
126 ctx
->ws
->buffer_unmap(buf
->cs_buf
);
134 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
135 unsigned type
, int diff
)
137 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
138 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
139 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
142 rctx
->num_occlusion_queries
+= diff
;
143 assert(rctx
->num_occlusion_queries
>= 0);
145 enable
= rctx
->num_occlusion_queries
!= 0;
147 if (enable
!= old_enable
) {
148 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
153 static void r600_emit_query_begin(struct r600_common_context
*ctx
, struct r600_query
*query
)
155 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
158 r600_update_occlusion_query_state(ctx
, query
->type
, 1);
159 r600_update_prims_generated_query_state(ctx
, query
->type
, 1);
160 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
* 2, TRUE
);
162 /* Get a new query buffer if needed. */
163 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
164 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
165 *qbuf
= query
->buffer
;
166 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
->type
);
167 query
->buffer
.results_end
= 0;
168 query
->buffer
.previous
= qbuf
;
171 /* emit begin query */
172 va
= r600_resource_va(ctx
->b
.screen
, (void*)query
->buffer
.buf
);
173 va
+= query
->buffer
.results_end
;
175 switch (query
->type
) {
176 case PIPE_QUERY_OCCLUSION_COUNTER
:
177 case PIPE_QUERY_OCCLUSION_PREDICATE
:
178 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
179 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
181 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
183 case PIPE_QUERY_PRIMITIVES_EMITTED
:
184 case PIPE_QUERY_PRIMITIVES_GENERATED
:
185 case PIPE_QUERY_SO_STATISTICS
:
186 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
187 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
188 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3));
190 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
192 case PIPE_QUERY_TIME_ELAPSED
:
193 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
194 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
196 radeon_emit(cs
, (3 << 29) | ((va
>> 32UL) & 0xFF));
200 case PIPE_QUERY_PIPELINE_STATISTICS
:
201 if (!ctx
->num_pipelinestat_queries
) {
202 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
203 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START
) | EVENT_INDEX(0));
205 ctx
->num_pipelinestat_queries
++;
206 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
207 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
209 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
214 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
217 if (!r600_is_timer_query(query
->type
)) {
218 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
222 static void r600_emit_query_end(struct r600_common_context
*ctx
, struct r600_query
*query
)
224 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
227 /* The queries which need begin already called this in begin_query. */
228 if (!r600_query_needs_begin(query
->type
)) {
229 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
, FALSE
);
232 va
= r600_resource_va(ctx
->b
.screen
, (void*)query
->buffer
.buf
);
234 switch (query
->type
) {
235 case PIPE_QUERY_OCCLUSION_COUNTER
:
236 case PIPE_QUERY_OCCLUSION_PREDICATE
:
237 va
+= query
->buffer
.results_end
+ 8;
238 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
239 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
241 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
243 case PIPE_QUERY_PRIMITIVES_EMITTED
:
244 case PIPE_QUERY_PRIMITIVES_GENERATED
:
245 case PIPE_QUERY_SO_STATISTICS
:
246 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
247 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
248 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
249 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3));
251 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
253 case PIPE_QUERY_TIME_ELAPSED
:
254 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
256 case PIPE_QUERY_TIMESTAMP
:
257 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
258 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
260 radeon_emit(cs
, (3 << 29) | ((va
>> 32UL) & 0xFF));
264 case PIPE_QUERY_PIPELINE_STATISTICS
:
265 assert(ctx
->num_pipelinestat_queries
> 0);
266 ctx
->num_pipelinestat_queries
--;
267 if (!ctx
->num_pipelinestat_queries
) {
268 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
269 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_STOP
) | EVENT_INDEX(0));
271 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
272 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
273 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
275 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
280 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
283 query
->buffer
.results_end
+= query
->result_size
;
285 if (r600_query_needs_begin(query
->type
)) {
286 if (!r600_is_timer_query(query
->type
)) {
287 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
291 r600_update_occlusion_query_state(ctx
, query
->type
, -1);
292 r600_update_prims_generated_query_state(ctx
, query
->type
, -1);
295 static void r600_emit_query_predication(struct r600_common_context
*ctx
, struct r600_query
*query
,
296 int operation
, bool flag_wait
)
298 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
300 if (operation
== PREDICATION_OP_CLEAR
) {
301 ctx
->need_gfx_cs_space(&ctx
->b
, 3, FALSE
);
303 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
305 radeon_emit(cs
, PRED_OP(PREDICATION_OP_CLEAR
));
307 struct r600_query_buffer
*qbuf
;
311 /* Find how many results there are. */
313 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
314 count
+= qbuf
->results_end
/ query
->result_size
;
317 ctx
->need_gfx_cs_space(&ctx
->b
, 5 * count
, TRUE
);
319 op
= PRED_OP(operation
) | PREDICATION_DRAW_VISIBLE
|
320 (flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
);
322 /* emit predicate packets for all data blocks */
323 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
324 unsigned results_base
= 0;
325 uint64_t va
= r600_resource_va(ctx
->b
.screen
, &qbuf
->buf
->b
.b
);
327 while (results_base
< qbuf
->results_end
) {
328 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
329 radeon_emit(cs
, (va
+ results_base
) & 0xFFFFFFFFUL
);
330 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32UL) & 0xFF));
331 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
333 results_base
+= query
->result_size
;
335 /* set CONTINUE bit for all packets except the first */
336 op
|= PREDICATION_CONTINUE
;
342 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
)
344 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
345 struct r600_query
*query
;
346 bool skip_allocation
= false;
348 query
= CALLOC_STRUCT(r600_query
);
352 query
->type
= query_type
;
354 switch (query_type
) {
355 case PIPE_QUERY_OCCLUSION_COUNTER
:
356 case PIPE_QUERY_OCCLUSION_PREDICATE
:
357 query
->result_size
= 16 * rctx
->max_db
;
358 query
->num_cs_dw
= 6;
360 case PIPE_QUERY_TIME_ELAPSED
:
361 query
->result_size
= 16;
362 query
->num_cs_dw
= 8;
364 case PIPE_QUERY_TIMESTAMP
:
365 query
->result_size
= 8;
366 query
->num_cs_dw
= 8;
368 case PIPE_QUERY_PRIMITIVES_EMITTED
:
369 case PIPE_QUERY_PRIMITIVES_GENERATED
:
370 case PIPE_QUERY_SO_STATISTICS
:
371 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
372 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
373 query
->result_size
= 32;
374 query
->num_cs_dw
= 6;
376 case PIPE_QUERY_PIPELINE_STATISTICS
:
377 /* 11 values on EG, 8 on R600. */
378 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
379 query
->num_cs_dw
= 8;
381 /* Non-GPU queries. */
382 case R600_QUERY_DRAW_CALLS
:
383 case R600_QUERY_REQUESTED_VRAM
:
384 case R600_QUERY_REQUESTED_GTT
:
385 case R600_QUERY_BUFFER_WAIT_TIME
:
386 case R600_QUERY_NUM_CS_FLUSHES
:
387 case R600_QUERY_NUM_BYTES_MOVED
:
388 case R600_QUERY_VRAM_USAGE
:
389 case R600_QUERY_GTT_USAGE
:
390 skip_allocation
= true;
398 if (!skip_allocation
) {
399 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query_type
);
400 if (!query
->buffer
.buf
) {
405 return (struct pipe_query
*)query
;
408 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
410 struct r600_query
*rquery
= (struct r600_query
*)query
;
411 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
413 /* Release all query buffers. */
415 struct r600_query_buffer
*qbuf
= prev
;
416 prev
= prev
->previous
;
417 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
421 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
425 static void r600_begin_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
427 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
428 struct r600_query
*rquery
= (struct r600_query
*)query
;
429 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
431 if (!r600_query_needs_begin(rquery
->type
)) {
436 /* Non-GPU queries. */
437 switch (rquery
->type
) {
438 case R600_QUERY_DRAW_CALLS
:
439 rquery
->begin_result
= rctx
->num_draw_calls
;
441 case R600_QUERY_REQUESTED_VRAM
:
442 case R600_QUERY_REQUESTED_GTT
:
443 case R600_QUERY_VRAM_USAGE
:
444 case R600_QUERY_GTT_USAGE
:
445 rquery
->begin_result
= 0;
447 case R600_QUERY_BUFFER_WAIT_TIME
:
448 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
);
450 case R600_QUERY_NUM_CS_FLUSHES
:
451 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
453 case R600_QUERY_NUM_BYTES_MOVED
:
454 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
458 /* Discard the old query buffers. */
460 struct r600_query_buffer
*qbuf
= prev
;
461 prev
= prev
->previous
;
462 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
466 /* Obtain a new buffer if the current one can't be mapped without a stall. */
467 if (r600_rings_is_buffer_referenced(rctx
, rquery
->buffer
.buf
->cs_buf
, RADEON_USAGE_READWRITE
) ||
468 rctx
->ws
->buffer_is_busy(rquery
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
)) {
469 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
470 rquery
->buffer
.buf
= r600_new_query_buffer(rctx
, rquery
->type
);
473 rquery
->buffer
.results_end
= 0;
474 rquery
->buffer
.previous
= NULL
;
476 r600_emit_query_begin(rctx
, rquery
);
478 if (!r600_is_timer_query(rquery
->type
)) {
479 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_nontimer_queries
);
483 static void r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
485 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
486 struct r600_query
*rquery
= (struct r600_query
*)query
;
488 /* Non-GPU queries. */
489 switch (rquery
->type
) {
490 case R600_QUERY_DRAW_CALLS
:
491 rquery
->end_result
= rctx
->num_draw_calls
;
493 case R600_QUERY_REQUESTED_VRAM
:
494 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_VRAM_MEMORY
);
496 case R600_QUERY_REQUESTED_GTT
:
497 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_GTT_MEMORY
);
499 case R600_QUERY_BUFFER_WAIT_TIME
:
500 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
);
502 case R600_QUERY_NUM_CS_FLUSHES
:
503 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
505 case R600_QUERY_NUM_BYTES_MOVED
:
506 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
508 case R600_QUERY_VRAM_USAGE
:
509 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_VRAM_USAGE
);
511 case R600_QUERY_GTT_USAGE
:
512 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GTT_USAGE
);
516 r600_emit_query_end(rctx
, rquery
);
518 if (r600_query_needs_begin(rquery
->type
) && !r600_is_timer_query(rquery
->type
)) {
519 LIST_DELINIT(&rquery
->list
);
523 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
524 bool test_status_bit
)
526 uint32_t *current_result
= (uint32_t*)map
;
529 start
= (uint64_t)current_result
[start_index
] |
530 (uint64_t)current_result
[start_index
+1] << 32;
531 end
= (uint64_t)current_result
[end_index
] |
532 (uint64_t)current_result
[end_index
+1] << 32;
534 if (!test_status_bit
||
535 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
541 static boolean
r600_get_query_buffer_result(struct r600_common_context
*ctx
,
542 struct r600_query
*query
,
543 struct r600_query_buffer
*qbuf
,
545 union pipe_query_result
*result
)
547 unsigned results_base
= 0;
550 /* Non-GPU queries. */
551 switch (query
->type
) {
552 case R600_QUERY_DRAW_CALLS
:
553 case R600_QUERY_REQUESTED_VRAM
:
554 case R600_QUERY_REQUESTED_GTT
:
555 case R600_QUERY_BUFFER_WAIT_TIME
:
556 case R600_QUERY_NUM_CS_FLUSHES
:
557 case R600_QUERY_NUM_BYTES_MOVED
:
558 case R600_QUERY_VRAM_USAGE
:
559 case R600_QUERY_GTT_USAGE
:
560 result
->u64
= query
->end_result
- query
->begin_result
;
564 map
= r600_buffer_map_sync_with_rings(ctx
, qbuf
->buf
,
566 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
570 /* count all results across all data blocks */
571 switch (query
->type
) {
572 case PIPE_QUERY_OCCLUSION_COUNTER
:
573 while (results_base
!= qbuf
->results_end
) {
575 r600_query_read_result(map
+ results_base
, 0, 2, true);
579 case PIPE_QUERY_OCCLUSION_PREDICATE
:
580 while (results_base
!= qbuf
->results_end
) {
581 result
->b
= result
->b
||
582 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
586 case PIPE_QUERY_TIME_ELAPSED
:
587 while (results_base
!= qbuf
->results_end
) {
589 r600_query_read_result(map
+ results_base
, 0, 2, false);
590 results_base
+= query
->result_size
;
593 case PIPE_QUERY_TIMESTAMP
:
595 uint32_t *current_result
= (uint32_t*)map
;
596 result
->u64
= (uint64_t)current_result
[0] |
597 (uint64_t)current_result
[1] << 32;
600 case PIPE_QUERY_PRIMITIVES_EMITTED
:
601 /* SAMPLE_STREAMOUTSTATS stores this structure:
603 * u64 NumPrimitivesWritten;
604 * u64 PrimitiveStorageNeeded;
606 * We only need NumPrimitivesWritten here. */
607 while (results_base
!= qbuf
->results_end
) {
609 r600_query_read_result(map
+ results_base
, 2, 6, true);
610 results_base
+= query
->result_size
;
613 case PIPE_QUERY_PRIMITIVES_GENERATED
:
614 /* Here we read PrimitiveStorageNeeded. */
615 while (results_base
!= qbuf
->results_end
) {
617 r600_query_read_result(map
+ results_base
, 0, 4, true);
618 results_base
+= query
->result_size
;
621 case PIPE_QUERY_SO_STATISTICS
:
622 while (results_base
!= qbuf
->results_end
) {
623 result
->so_statistics
.num_primitives_written
+=
624 r600_query_read_result(map
+ results_base
, 2, 6, true);
625 result
->so_statistics
.primitives_storage_needed
+=
626 r600_query_read_result(map
+ results_base
, 0, 4, true);
627 results_base
+= query
->result_size
;
630 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
631 while (results_base
!= qbuf
->results_end
) {
632 result
->b
= result
->b
||
633 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
634 r600_query_read_result(map
+ results_base
, 0, 4, true);
635 results_base
+= query
->result_size
;
638 case PIPE_QUERY_PIPELINE_STATISTICS
:
639 if (ctx
->chip_class
>= EVERGREEN
) {
640 while (results_base
!= qbuf
->results_end
) {
641 result
->pipeline_statistics
.ps_invocations
+=
642 r600_query_read_result(map
+ results_base
, 0, 22, false);
643 result
->pipeline_statistics
.c_primitives
+=
644 r600_query_read_result(map
+ results_base
, 2, 24, false);
645 result
->pipeline_statistics
.c_invocations
+=
646 r600_query_read_result(map
+ results_base
, 4, 26, false);
647 result
->pipeline_statistics
.vs_invocations
+=
648 r600_query_read_result(map
+ results_base
, 6, 28, false);
649 result
->pipeline_statistics
.gs_invocations
+=
650 r600_query_read_result(map
+ results_base
, 8, 30, false);
651 result
->pipeline_statistics
.gs_primitives
+=
652 r600_query_read_result(map
+ results_base
, 10, 32, false);
653 result
->pipeline_statistics
.ia_primitives
+=
654 r600_query_read_result(map
+ results_base
, 12, 34, false);
655 result
->pipeline_statistics
.ia_vertices
+=
656 r600_query_read_result(map
+ results_base
, 14, 36, false);
657 result
->pipeline_statistics
.hs_invocations
+=
658 r600_query_read_result(map
+ results_base
, 16, 38, false);
659 result
->pipeline_statistics
.ds_invocations
+=
660 r600_query_read_result(map
+ results_base
, 18, 40, false);
661 result
->pipeline_statistics
.cs_invocations
+=
662 r600_query_read_result(map
+ results_base
, 20, 42, false);
663 results_base
+= query
->result_size
;
666 while (results_base
!= qbuf
->results_end
) {
667 result
->pipeline_statistics
.ps_invocations
+=
668 r600_query_read_result(map
+ results_base
, 0, 16, false);
669 result
->pipeline_statistics
.c_primitives
+=
670 r600_query_read_result(map
+ results_base
, 2, 18, false);
671 result
->pipeline_statistics
.c_invocations
+=
672 r600_query_read_result(map
+ results_base
, 4, 20, false);
673 result
->pipeline_statistics
.vs_invocations
+=
674 r600_query_read_result(map
+ results_base
, 6, 22, false);
675 result
->pipeline_statistics
.gs_invocations
+=
676 r600_query_read_result(map
+ results_base
, 8, 24, false);
677 result
->pipeline_statistics
.gs_primitives
+=
678 r600_query_read_result(map
+ results_base
, 10, 26, false);
679 result
->pipeline_statistics
.ia_primitives
+=
680 r600_query_read_result(map
+ results_base
, 12, 28, false);
681 result
->pipeline_statistics
.ia_vertices
+=
682 r600_query_read_result(map
+ results_base
, 14, 30, false);
683 results_base
+= query
->result_size
;
686 #if 0 /* for testing */
687 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
688 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
689 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
690 result
->pipeline_statistics
.ia_vertices
,
691 result
->pipeline_statistics
.ia_primitives
,
692 result
->pipeline_statistics
.vs_invocations
,
693 result
->pipeline_statistics
.hs_invocations
,
694 result
->pipeline_statistics
.ds_invocations
,
695 result
->pipeline_statistics
.gs_invocations
,
696 result
->pipeline_statistics
.gs_primitives
,
697 result
->pipeline_statistics
.c_invocations
,
698 result
->pipeline_statistics
.c_primitives
,
699 result
->pipeline_statistics
.ps_invocations
,
700 result
->pipeline_statistics
.cs_invocations
);
707 ctx
->ws
->buffer_unmap(qbuf
->buf
->cs_buf
);
711 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
712 struct pipe_query
*query
,
713 boolean wait
, union pipe_query_result
*result
)
715 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
716 struct r600_query
*rquery
= (struct r600_query
*)query
;
717 struct r600_query_buffer
*qbuf
;
719 util_query_clear_result(result
, rquery
->type
);
721 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
722 if (!r600_get_query_buffer_result(rctx
, rquery
, qbuf
, wait
, result
)) {
727 /* Convert the time to expected units. */
728 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
729 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
730 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.r600_clock_crystal_freq
;
735 static void r600_render_condition(struct pipe_context
*ctx
,
736 struct pipe_query
*query
,
740 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
741 struct r600_query
*rquery
= (struct r600_query
*)query
;
742 bool wait_flag
= false;
744 rctx
->current_render_cond
= query
;
745 rctx
->current_render_cond_cond
= condition
;
746 rctx
->current_render_cond_mode
= mode
;
749 if (rctx
->predicate_drawing
) {
750 rctx
->predicate_drawing
= false;
751 r600_emit_query_predication(rctx
, NULL
, PREDICATION_OP_CLEAR
, false);
756 if (mode
== PIPE_RENDER_COND_WAIT
||
757 mode
== PIPE_RENDER_COND_BY_REGION_WAIT
) {
761 rctx
->predicate_drawing
= true;
763 switch (rquery
->type
) {
764 case PIPE_QUERY_OCCLUSION_COUNTER
:
765 case PIPE_QUERY_OCCLUSION_PREDICATE
:
766 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_ZPASS
, wait_flag
);
768 case PIPE_QUERY_PRIMITIVES_EMITTED
:
769 case PIPE_QUERY_PRIMITIVES_GENERATED
:
770 case PIPE_QUERY_SO_STATISTICS
:
771 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
772 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_PRIMCOUNT
, wait_flag
);
779 void r600_suspend_nontimer_queries(struct r600_common_context
*ctx
)
781 struct r600_query
*query
;
783 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_queries
, list
) {
784 r600_emit_query_end(ctx
, query
);
786 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
789 void r600_resume_nontimer_queries(struct r600_common_context
*ctx
)
791 struct r600_query
*query
;
793 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
795 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_queries
, list
) {
796 r600_emit_query_begin(ctx
, query
);
800 /* Get backends mask */
801 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
803 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
804 struct r600_resource
*buffer
;
806 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
807 unsigned i
, mask
= 0;
810 /* if backend_map query is supported by the kernel */
811 if (ctx
->screen
->info
.r600_backend_map_valid
) {
812 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
813 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
814 unsigned item_width
, item_mask
;
816 if (ctx
->chip_class
>= EVERGREEN
) {
824 while(num_tile_pipes
--) {
825 i
= backend_map
& item_mask
;
827 backend_map
>>= item_width
;
830 ctx
->backend_mask
= mask
;
835 /* otherwise backup path for older kernels */
837 /* create buffer for event data */
838 buffer
= (struct r600_resource
*)
839 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
840 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
843 va
= r600_resource_va(ctx
->b
.screen
, (void*)buffer
);
845 /* initialize buffer with zeroes */
846 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
848 memset(results
, 0, ctx
->max_db
* 4 * 4);
849 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
851 /* emit EVENT_WRITE for ZPASS_DONE */
852 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
853 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
855 radeon_emit(cs
, va
>> 32);
857 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, buffer
, RADEON_USAGE_WRITE
, RADEON_PRIO_MIN
);
859 /* analyze results */
860 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
862 for(i
= 0; i
< ctx
->max_db
; i
++) {
863 /* at least highest bit will be set if backend is used */
864 if (results
[i
*4 + 1])
867 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
871 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
874 ctx
->backend_mask
= mask
;
879 /* fallback to old method - set num_backends lower bits to 1 */
880 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
884 void r600_query_init(struct r600_common_context
*rctx
)
886 rctx
->b
.create_query
= r600_create_query
;
887 rctx
->b
.destroy_query
= r600_destroy_query
;
888 rctx
->b
.begin_query
= r600_begin_query
;
889 rctx
->b
.end_query
= r600_end_query
;
890 rctx
->b
.get_query_result
= r600_get_query_result
;
892 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.r600_num_backends
> 0)
893 rctx
->b
.render_condition
= r600_render_condition
;
895 LIST_INITHEAD(&rctx
->active_nontimer_queries
);