2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "util/u_memory.h"
29 struct r600_query_buffer
{
30 /* The buffer where query results are stored. */
31 struct r600_resource
*buf
;
32 /* Offset of the next free result after current query data */
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer
*previous
;
41 /* The query buffer and how many results are in it. */
42 struct r600_query_buffer buffer
;
43 /* The type of query */
45 /* Size of the result in memory for both begin_query and end_query,
46 * this can be one or two numbers, or it could even be a size of a structure. */
48 /* The number of dwords for begin_query or end_query. */
50 /* linked list of queries */
51 struct list_head list
;
52 /* for custom non-GPU queries */
53 uint64_t begin_result
;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle
*fence
;
57 /* For transform feedback: which stream the query is for */
62 static bool r600_is_timer_query(unsigned type
)
64 return type
== PIPE_QUERY_TIME_ELAPSED
||
65 type
== PIPE_QUERY_TIMESTAMP
;
68 static bool r600_query_needs_begin(unsigned type
)
70 return type
!= PIPE_QUERY_GPU_FINISHED
&&
71 type
!= PIPE_QUERY_TIMESTAMP
;
74 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
, unsigned type
)
76 unsigned j
, i
, num_results
, buf_size
= 4096;
79 /* Non-GPU queries. */
81 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
82 case PIPE_QUERY_GPU_FINISHED
:
83 case R600_QUERY_DRAW_CALLS
:
84 case R600_QUERY_REQUESTED_VRAM
:
85 case R600_QUERY_REQUESTED_GTT
:
86 case R600_QUERY_BUFFER_WAIT_TIME
:
87 case R600_QUERY_NUM_CS_FLUSHES
:
88 case R600_QUERY_NUM_BYTES_MOVED
:
89 case R600_QUERY_VRAM_USAGE
:
90 case R600_QUERY_GTT_USAGE
:
91 case R600_QUERY_GPU_TEMPERATURE
:
92 case R600_QUERY_CURRENT_GPU_SCLK
:
93 case R600_QUERY_CURRENT_GPU_MCLK
:
94 case R600_QUERY_GPU_LOAD
:
95 case R600_QUERY_NUM_COMPILATIONS
:
96 case R600_QUERY_NUM_SHADERS_CREATED
:
100 /* Queries are normally read by the CPU after
101 * being written by the gpu, hence staging is probably a good
104 struct r600_resource
*buf
= (struct r600_resource
*)
105 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
106 PIPE_USAGE_STAGING
, buf_size
);
109 case PIPE_QUERY_OCCLUSION_COUNTER
:
110 case PIPE_QUERY_OCCLUSION_PREDICATE
:
111 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
112 memset(results
, 0, buf_size
);
114 /* Set top bits for unused backends. */
115 num_results
= buf_size
/ (16 * ctx
->max_db
);
116 for (j
= 0; j
< num_results
; j
++) {
117 for (i
= 0; i
< ctx
->max_db
; i
++) {
118 if (!(ctx
->backend_mask
& (1<<i
))) {
119 results
[(i
* 4)+1] = 0x80000000;
120 results
[(i
* 4)+3] = 0x80000000;
123 results
+= 4 * ctx
->max_db
;
126 case PIPE_QUERY_TIME_ELAPSED
:
127 case PIPE_QUERY_TIMESTAMP
:
129 case PIPE_QUERY_PRIMITIVES_EMITTED
:
130 case PIPE_QUERY_PRIMITIVES_GENERATED
:
131 case PIPE_QUERY_SO_STATISTICS
:
132 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
133 case PIPE_QUERY_PIPELINE_STATISTICS
:
134 results
= r600_buffer_map_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
135 memset(results
, 0, buf_size
);
143 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
144 unsigned type
, int diff
)
146 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
147 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
148 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
151 rctx
->num_occlusion_queries
+= diff
;
152 assert(rctx
->num_occlusion_queries
>= 0);
154 enable
= rctx
->num_occlusion_queries
!= 0;
156 if (enable
!= old_enable
) {
157 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
162 static unsigned event_type_for_stream(struct r600_query
*query
)
164 switch (query
->stream
) {
166 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
167 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
168 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
169 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
173 static void r600_emit_query_begin(struct r600_common_context
*ctx
, struct r600_query
*query
)
175 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
178 r600_update_occlusion_query_state(ctx
, query
->type
, 1);
179 r600_update_prims_generated_query_state(ctx
, query
->type
, 1);
180 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
* 2, TRUE
);
182 /* Get a new query buffer if needed. */
183 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
184 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
185 *qbuf
= query
->buffer
;
186 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
->type
);
187 query
->buffer
.results_end
= 0;
188 query
->buffer
.previous
= qbuf
;
191 /* emit begin query */
192 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
194 switch (query
->type
) {
195 case PIPE_QUERY_OCCLUSION_COUNTER
:
196 case PIPE_QUERY_OCCLUSION_PREDICATE
:
197 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
198 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
200 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
202 case PIPE_QUERY_PRIMITIVES_EMITTED
:
203 case PIPE_QUERY_PRIMITIVES_GENERATED
:
204 case PIPE_QUERY_SO_STATISTICS
:
205 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
206 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
207 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
209 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
211 case PIPE_QUERY_TIME_ELAPSED
:
212 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
213 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
215 radeon_emit(cs
, (3 << 29) | ((va
>> 32UL) & 0xFF));
219 case PIPE_QUERY_PIPELINE_STATISTICS
:
220 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
221 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
223 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
228 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
231 if (r600_is_timer_query(query
->type
))
232 ctx
->num_cs_dw_timer_queries_suspend
+= query
->num_cs_dw
;
234 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
237 static void r600_emit_query_end(struct r600_common_context
*ctx
, struct r600_query
*query
)
239 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
242 /* The queries which need begin already called this in begin_query. */
243 if (!r600_query_needs_begin(query
->type
)) {
244 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
, FALSE
);
247 va
= query
->buffer
.buf
->gpu_address
;
250 switch (query
->type
) {
251 case PIPE_QUERY_OCCLUSION_COUNTER
:
252 case PIPE_QUERY_OCCLUSION_PREDICATE
:
253 va
+= query
->buffer
.results_end
+ 8;
254 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
255 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
257 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
259 case PIPE_QUERY_PRIMITIVES_EMITTED
:
260 case PIPE_QUERY_PRIMITIVES_GENERATED
:
261 case PIPE_QUERY_SO_STATISTICS
:
262 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
263 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
264 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
265 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
267 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
269 case PIPE_QUERY_TIME_ELAPSED
:
270 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
272 case PIPE_QUERY_TIMESTAMP
:
273 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
274 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
276 radeon_emit(cs
, (3 << 29) | ((va
>> 32UL) & 0xFF));
280 case PIPE_QUERY_PIPELINE_STATISTICS
:
281 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
282 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
283 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
285 radeon_emit(cs
, (va
>> 32UL) & 0xFF);
290 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
293 query
->buffer
.results_end
+= query
->result_size
;
295 if (r600_query_needs_begin(query
->type
)) {
296 if (r600_is_timer_query(query
->type
))
297 ctx
->num_cs_dw_timer_queries_suspend
-= query
->num_cs_dw
;
299 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
302 r600_update_occlusion_query_state(ctx
, query
->type
, -1);
303 r600_update_prims_generated_query_state(ctx
, query
->type
, -1);
306 static void r600_emit_query_predication(struct r600_common_context
*ctx
, struct r600_query
*query
,
307 int operation
, bool flag_wait
)
309 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
310 uint32_t op
= PRED_OP(operation
);
312 /* if true then invert, see GL_ARB_conditional_render_inverted */
313 if (ctx
->current_render_cond_cond
)
314 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
316 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
318 if (operation
== PREDICATION_OP_CLEAR
) {
319 ctx
->need_gfx_cs_space(&ctx
->b
, 3, FALSE
);
321 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
323 radeon_emit(cs
, PRED_OP(PREDICATION_OP_CLEAR
));
325 struct r600_query_buffer
*qbuf
;
327 /* Find how many results there are. */
329 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
330 count
+= qbuf
->results_end
/ query
->result_size
;
333 ctx
->need_gfx_cs_space(&ctx
->b
, 5 * count
, TRUE
);
335 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
337 /* emit predicate packets for all data blocks */
338 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
339 unsigned results_base
= 0;
340 uint64_t va
= qbuf
->buf
->gpu_address
;
342 while (results_base
< qbuf
->results_end
) {
343 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
344 radeon_emit(cs
, (va
+ results_base
) & 0xFFFFFFFFUL
);
345 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32UL) & 0xFF));
346 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
348 results_base
+= query
->result_size
;
350 /* set CONTINUE bit for all packets except the first */
351 op
|= PREDICATION_CONTINUE
;
357 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
359 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
360 struct r600_query
*query
;
361 bool skip_allocation
= false;
363 query
= CALLOC_STRUCT(r600_query
);
367 query
->type
= query_type
;
369 switch (query_type
) {
370 case PIPE_QUERY_OCCLUSION_COUNTER
:
371 case PIPE_QUERY_OCCLUSION_PREDICATE
:
372 query
->result_size
= 16 * rctx
->max_db
;
373 query
->num_cs_dw
= 6;
376 case PIPE_QUERY_TIME_ELAPSED
:
377 query
->result_size
= 16;
378 query
->num_cs_dw
= 8;
380 case PIPE_QUERY_TIMESTAMP
:
381 query
->result_size
= 8;
382 query
->num_cs_dw
= 8;
384 case PIPE_QUERY_PRIMITIVES_EMITTED
:
385 case PIPE_QUERY_PRIMITIVES_GENERATED
:
386 case PIPE_QUERY_SO_STATISTICS
:
387 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
388 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
389 query
->result_size
= 32;
390 query
->num_cs_dw
= 6;
391 query
->stream
= index
;
393 case PIPE_QUERY_PIPELINE_STATISTICS
:
394 /* 11 values on EG, 8 on R600. */
395 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
396 query
->num_cs_dw
= 6;
398 /* Non-GPU queries and queries not requiring a buffer. */
399 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
400 case PIPE_QUERY_GPU_FINISHED
:
401 case R600_QUERY_DRAW_CALLS
:
402 case R600_QUERY_REQUESTED_VRAM
:
403 case R600_QUERY_REQUESTED_GTT
:
404 case R600_QUERY_BUFFER_WAIT_TIME
:
405 case R600_QUERY_NUM_CS_FLUSHES
:
406 case R600_QUERY_NUM_BYTES_MOVED
:
407 case R600_QUERY_VRAM_USAGE
:
408 case R600_QUERY_GTT_USAGE
:
409 case R600_QUERY_GPU_TEMPERATURE
:
410 case R600_QUERY_CURRENT_GPU_SCLK
:
411 case R600_QUERY_CURRENT_GPU_MCLK
:
412 case R600_QUERY_GPU_LOAD
:
413 case R600_QUERY_NUM_COMPILATIONS
:
414 case R600_QUERY_NUM_SHADERS_CREATED
:
415 skip_allocation
= true;
423 if (!skip_allocation
) {
424 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query_type
);
425 if (!query
->buffer
.buf
) {
430 return (struct pipe_query
*)query
;
433 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
435 struct r600_query
*rquery
= (struct r600_query
*)query
;
436 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
438 /* Release all query buffers. */
440 struct r600_query_buffer
*qbuf
= prev
;
441 prev
= prev
->previous
;
442 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
446 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
450 static boolean
r600_begin_query(struct pipe_context
*ctx
,
451 struct pipe_query
*query
)
453 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
454 struct r600_query
*rquery
= (struct r600_query
*)query
;
455 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
457 if (!r600_query_needs_begin(rquery
->type
)) {
462 /* Non-GPU queries. */
463 switch (rquery
->type
) {
464 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
466 case R600_QUERY_DRAW_CALLS
:
467 rquery
->begin_result
= rctx
->num_draw_calls
;
469 case R600_QUERY_REQUESTED_VRAM
:
470 case R600_QUERY_REQUESTED_GTT
:
471 case R600_QUERY_VRAM_USAGE
:
472 case R600_QUERY_GTT_USAGE
:
473 case R600_QUERY_GPU_TEMPERATURE
:
474 case R600_QUERY_CURRENT_GPU_SCLK
:
475 case R600_QUERY_CURRENT_GPU_MCLK
:
476 rquery
->begin_result
= 0;
478 case R600_QUERY_BUFFER_WAIT_TIME
:
479 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
) / 1000;
481 case R600_QUERY_NUM_CS_FLUSHES
:
482 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
484 case R600_QUERY_NUM_BYTES_MOVED
:
485 rquery
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
487 case R600_QUERY_GPU_LOAD
:
488 rquery
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
490 case R600_QUERY_NUM_COMPILATIONS
:
491 rquery
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
493 case R600_QUERY_NUM_SHADERS_CREATED
:
494 rquery
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
498 /* Discard the old query buffers. */
500 struct r600_query_buffer
*qbuf
= prev
;
501 prev
= prev
->previous
;
502 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
506 /* Obtain a new buffer if the current one can't be mapped without a stall. */
507 if (r600_rings_is_buffer_referenced(rctx
, rquery
->buffer
.buf
->cs_buf
, RADEON_USAGE_READWRITE
) ||
508 rctx
->ws
->buffer_is_busy(rquery
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
)) {
509 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
510 rquery
->buffer
.buf
= r600_new_query_buffer(rctx
, rquery
->type
);
513 rquery
->buffer
.results_end
= 0;
514 rquery
->buffer
.previous
= NULL
;
516 r600_emit_query_begin(rctx
, rquery
);
518 if (r600_is_timer_query(rquery
->type
))
519 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_timer_queries
);
521 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_nontimer_queries
);
525 static void r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
527 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
528 struct r600_query
*rquery
= (struct r600_query
*)query
;
530 /* Non-GPU queries. */
531 switch (rquery
->type
) {
532 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
534 case PIPE_QUERY_GPU_FINISHED
:
535 rctx
->rings
.gfx
.flush(rctx
, RADEON_FLUSH_ASYNC
, &rquery
->fence
);
537 case R600_QUERY_DRAW_CALLS
:
538 rquery
->end_result
= rctx
->num_draw_calls
;
540 case R600_QUERY_REQUESTED_VRAM
:
541 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_VRAM_MEMORY
);
543 case R600_QUERY_REQUESTED_GTT
:
544 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_REQUESTED_GTT_MEMORY
);
546 case R600_QUERY_BUFFER_WAIT_TIME
:
547 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_BUFFER_WAIT_TIME_NS
) / 1000;
549 case R600_QUERY_NUM_CS_FLUSHES
:
550 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_CS_FLUSHES
);
552 case R600_QUERY_NUM_BYTES_MOVED
:
553 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_NUM_BYTES_MOVED
);
555 case R600_QUERY_VRAM_USAGE
:
556 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_VRAM_USAGE
);
558 case R600_QUERY_GTT_USAGE
:
559 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GTT_USAGE
);
561 case R600_QUERY_GPU_TEMPERATURE
:
562 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_GPU_TEMPERATURE
) / 1000;
564 case R600_QUERY_CURRENT_GPU_SCLK
:
565 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_SCLK
) * 1000000;
567 case R600_QUERY_CURRENT_GPU_MCLK
:
568 rquery
->end_result
= rctx
->ws
->query_value(rctx
->ws
, RADEON_CURRENT_MCLK
) * 1000000;
570 case R600_QUERY_GPU_LOAD
:
571 rquery
->end_result
= r600_gpu_load_end(rctx
->screen
, rquery
->begin_result
);
573 case R600_QUERY_NUM_COMPILATIONS
:
574 rquery
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
576 case R600_QUERY_NUM_SHADERS_CREATED
:
577 rquery
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
581 r600_emit_query_end(rctx
, rquery
);
583 if (r600_query_needs_begin(rquery
->type
))
584 LIST_DELINIT(&rquery
->list
);
587 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
588 bool test_status_bit
)
590 uint32_t *current_result
= (uint32_t*)map
;
593 start
= (uint64_t)current_result
[start_index
] |
594 (uint64_t)current_result
[start_index
+1] << 32;
595 end
= (uint64_t)current_result
[end_index
] |
596 (uint64_t)current_result
[end_index
+1] << 32;
598 if (!test_status_bit
||
599 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
605 static boolean
r600_get_query_buffer_result(struct r600_common_context
*ctx
,
606 struct r600_query
*query
,
607 struct r600_query_buffer
*qbuf
,
609 union pipe_query_result
*result
)
611 struct pipe_screen
*screen
= ctx
->b
.screen
;
612 unsigned results_base
= 0;
615 /* Non-GPU queries. */
616 switch (query
->type
) {
617 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
618 /* Convert from cycles per millisecond to cycles per second (Hz). */
619 result
->timestamp_disjoint
.frequency
=
620 (uint64_t)ctx
->screen
->info
.r600_clock_crystal_freq
* 1000;
621 result
->timestamp_disjoint
.disjoint
= FALSE
;
623 case PIPE_QUERY_GPU_FINISHED
:
624 result
->b
= screen
->fence_finish(screen
, query
->fence
,
625 wait
? PIPE_TIMEOUT_INFINITE
: 0);
627 case R600_QUERY_DRAW_CALLS
:
628 case R600_QUERY_REQUESTED_VRAM
:
629 case R600_QUERY_REQUESTED_GTT
:
630 case R600_QUERY_BUFFER_WAIT_TIME
:
631 case R600_QUERY_NUM_CS_FLUSHES
:
632 case R600_QUERY_NUM_BYTES_MOVED
:
633 case R600_QUERY_VRAM_USAGE
:
634 case R600_QUERY_GTT_USAGE
:
635 case R600_QUERY_GPU_TEMPERATURE
:
636 case R600_QUERY_CURRENT_GPU_SCLK
:
637 case R600_QUERY_CURRENT_GPU_MCLK
:
638 case R600_QUERY_NUM_COMPILATIONS
:
639 case R600_QUERY_NUM_SHADERS_CREATED
:
640 result
->u64
= query
->end_result
- query
->begin_result
;
642 case R600_QUERY_GPU_LOAD
:
643 result
->u64
= query
->end_result
;
647 map
= r600_buffer_map_sync_with_rings(ctx
, qbuf
->buf
,
649 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
653 /* count all results across all data blocks */
654 switch (query
->type
) {
655 case PIPE_QUERY_OCCLUSION_COUNTER
:
656 while (results_base
!= qbuf
->results_end
) {
658 r600_query_read_result(map
+ results_base
, 0, 2, true);
662 case PIPE_QUERY_OCCLUSION_PREDICATE
:
663 while (results_base
!= qbuf
->results_end
) {
664 result
->b
= result
->b
||
665 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
669 case PIPE_QUERY_TIME_ELAPSED
:
670 while (results_base
!= qbuf
->results_end
) {
672 r600_query_read_result(map
+ results_base
, 0, 2, false);
673 results_base
+= query
->result_size
;
676 case PIPE_QUERY_TIMESTAMP
:
678 uint32_t *current_result
= (uint32_t*)map
;
679 result
->u64
= (uint64_t)current_result
[0] |
680 (uint64_t)current_result
[1] << 32;
683 case PIPE_QUERY_PRIMITIVES_EMITTED
:
684 /* SAMPLE_STREAMOUTSTATS stores this structure:
686 * u64 NumPrimitivesWritten;
687 * u64 PrimitiveStorageNeeded;
689 * We only need NumPrimitivesWritten here. */
690 while (results_base
!= qbuf
->results_end
) {
692 r600_query_read_result(map
+ results_base
, 2, 6, true);
693 results_base
+= query
->result_size
;
696 case PIPE_QUERY_PRIMITIVES_GENERATED
:
697 /* Here we read PrimitiveStorageNeeded. */
698 while (results_base
!= qbuf
->results_end
) {
700 r600_query_read_result(map
+ results_base
, 0, 4, true);
701 results_base
+= query
->result_size
;
704 case PIPE_QUERY_SO_STATISTICS
:
705 while (results_base
!= qbuf
->results_end
) {
706 result
->so_statistics
.num_primitives_written
+=
707 r600_query_read_result(map
+ results_base
, 2, 6, true);
708 result
->so_statistics
.primitives_storage_needed
+=
709 r600_query_read_result(map
+ results_base
, 0, 4, true);
710 results_base
+= query
->result_size
;
713 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
714 while (results_base
!= qbuf
->results_end
) {
715 result
->b
= result
->b
||
716 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
717 r600_query_read_result(map
+ results_base
, 0, 4, true);
718 results_base
+= query
->result_size
;
721 case PIPE_QUERY_PIPELINE_STATISTICS
:
722 if (ctx
->chip_class
>= EVERGREEN
) {
723 while (results_base
!= qbuf
->results_end
) {
724 result
->pipeline_statistics
.ps_invocations
+=
725 r600_query_read_result(map
+ results_base
, 0, 22, false);
726 result
->pipeline_statistics
.c_primitives
+=
727 r600_query_read_result(map
+ results_base
, 2, 24, false);
728 result
->pipeline_statistics
.c_invocations
+=
729 r600_query_read_result(map
+ results_base
, 4, 26, false);
730 result
->pipeline_statistics
.vs_invocations
+=
731 r600_query_read_result(map
+ results_base
, 6, 28, false);
732 result
->pipeline_statistics
.gs_invocations
+=
733 r600_query_read_result(map
+ results_base
, 8, 30, false);
734 result
->pipeline_statistics
.gs_primitives
+=
735 r600_query_read_result(map
+ results_base
, 10, 32, false);
736 result
->pipeline_statistics
.ia_primitives
+=
737 r600_query_read_result(map
+ results_base
, 12, 34, false);
738 result
->pipeline_statistics
.ia_vertices
+=
739 r600_query_read_result(map
+ results_base
, 14, 36, false);
740 result
->pipeline_statistics
.hs_invocations
+=
741 r600_query_read_result(map
+ results_base
, 16, 38, false);
742 result
->pipeline_statistics
.ds_invocations
+=
743 r600_query_read_result(map
+ results_base
, 18, 40, false);
744 result
->pipeline_statistics
.cs_invocations
+=
745 r600_query_read_result(map
+ results_base
, 20, 42, false);
746 results_base
+= query
->result_size
;
749 while (results_base
!= qbuf
->results_end
) {
750 result
->pipeline_statistics
.ps_invocations
+=
751 r600_query_read_result(map
+ results_base
, 0, 16, false);
752 result
->pipeline_statistics
.c_primitives
+=
753 r600_query_read_result(map
+ results_base
, 2, 18, false);
754 result
->pipeline_statistics
.c_invocations
+=
755 r600_query_read_result(map
+ results_base
, 4, 20, false);
756 result
->pipeline_statistics
.vs_invocations
+=
757 r600_query_read_result(map
+ results_base
, 6, 22, false);
758 result
->pipeline_statistics
.gs_invocations
+=
759 r600_query_read_result(map
+ results_base
, 8, 24, false);
760 result
->pipeline_statistics
.gs_primitives
+=
761 r600_query_read_result(map
+ results_base
, 10, 26, false);
762 result
->pipeline_statistics
.ia_primitives
+=
763 r600_query_read_result(map
+ results_base
, 12, 28, false);
764 result
->pipeline_statistics
.ia_vertices
+=
765 r600_query_read_result(map
+ results_base
, 14, 30, false);
766 results_base
+= query
->result_size
;
769 #if 0 /* for testing */
770 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
771 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
772 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
773 result
->pipeline_statistics
.ia_vertices
,
774 result
->pipeline_statistics
.ia_primitives
,
775 result
->pipeline_statistics
.vs_invocations
,
776 result
->pipeline_statistics
.hs_invocations
,
777 result
->pipeline_statistics
.ds_invocations
,
778 result
->pipeline_statistics
.gs_invocations
,
779 result
->pipeline_statistics
.gs_primitives
,
780 result
->pipeline_statistics
.c_invocations
,
781 result
->pipeline_statistics
.c_primitives
,
782 result
->pipeline_statistics
.ps_invocations
,
783 result
->pipeline_statistics
.cs_invocations
);
793 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
794 struct pipe_query
*query
,
795 boolean wait
, union pipe_query_result
*result
)
797 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
798 struct r600_query
*rquery
= (struct r600_query
*)query
;
799 struct r600_query_buffer
*qbuf
;
801 util_query_clear_result(result
, rquery
->type
);
803 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
804 if (!r600_get_query_buffer_result(rctx
, rquery
, qbuf
, wait
, result
)) {
809 /* Convert the time to expected units. */
810 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
811 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
812 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.r600_clock_crystal_freq
;
817 static void r600_render_condition(struct pipe_context
*ctx
,
818 struct pipe_query
*query
,
822 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
823 struct r600_query
*rquery
= (struct r600_query
*)query
;
824 bool wait_flag
= false;
826 rctx
->current_render_cond
= query
;
827 rctx
->current_render_cond_cond
= condition
;
828 rctx
->current_render_cond_mode
= mode
;
831 if (rctx
->predicate_drawing
) {
832 rctx
->predicate_drawing
= false;
833 r600_emit_query_predication(rctx
, NULL
, PREDICATION_OP_CLEAR
, false);
838 if (mode
== PIPE_RENDER_COND_WAIT
||
839 mode
== PIPE_RENDER_COND_BY_REGION_WAIT
) {
843 rctx
->predicate_drawing
= true;
845 switch (rquery
->type
) {
846 case PIPE_QUERY_OCCLUSION_COUNTER
:
847 case PIPE_QUERY_OCCLUSION_PREDICATE
:
848 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_ZPASS
, wait_flag
);
850 case PIPE_QUERY_PRIMITIVES_EMITTED
:
851 case PIPE_QUERY_PRIMITIVES_GENERATED
:
852 case PIPE_QUERY_SO_STATISTICS
:
853 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
854 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_PRIMCOUNT
, wait_flag
);
861 static void r600_suspend_queries(struct r600_common_context
*ctx
,
862 struct list_head
*query_list
,
863 unsigned *num_cs_dw_queries_suspend
)
865 struct r600_query
*query
;
867 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
868 r600_emit_query_end(ctx
, query
);
870 assert(*num_cs_dw_queries_suspend
== 0);
873 void r600_suspend_nontimer_queries(struct r600_common_context
*ctx
)
875 r600_suspend_queries(ctx
, &ctx
->active_nontimer_queries
,
876 &ctx
->num_cs_dw_nontimer_queries_suspend
);
879 void r600_suspend_timer_queries(struct r600_common_context
*ctx
)
881 r600_suspend_queries(ctx
, &ctx
->active_timer_queries
,
882 &ctx
->num_cs_dw_timer_queries_suspend
);
885 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
886 struct list_head
*query_list
)
888 struct r600_query
*query
;
891 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
893 num_dw
+= query
->num_cs_dw
* 2;
895 /* Workaround for the fact that
896 * num_cs_dw_nontimer_queries_suspend is incremented for every
897 * resumed query, which raises the bar in need_cs_space for
898 * queries about to be resumed.
900 num_dw
+= query
->num_cs_dw
;
902 /* primitives generated query */
903 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
904 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
910 static void r600_resume_queries(struct r600_common_context
*ctx
,
911 struct list_head
*query_list
,
912 unsigned *num_cs_dw_queries_suspend
)
914 struct r600_query
*query
;
915 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, query_list
);
917 assert(*num_cs_dw_queries_suspend
== 0);
919 /* Check CS space here. Resuming must not be interrupted by flushes. */
920 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, TRUE
);
922 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
923 r600_emit_query_begin(ctx
, query
);
927 void r600_resume_nontimer_queries(struct r600_common_context
*ctx
)
929 r600_resume_queries(ctx
, &ctx
->active_nontimer_queries
,
930 &ctx
->num_cs_dw_nontimer_queries_suspend
);
933 void r600_resume_timer_queries(struct r600_common_context
*ctx
)
935 r600_resume_queries(ctx
, &ctx
->active_timer_queries
,
936 &ctx
->num_cs_dw_timer_queries_suspend
);
939 /* Get backends mask */
940 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
942 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
943 struct r600_resource
*buffer
;
945 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
946 unsigned i
, mask
= 0;
948 /* if backend_map query is supported by the kernel */
949 if (ctx
->screen
->info
.r600_backend_map_valid
) {
950 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
951 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
952 unsigned item_width
, item_mask
;
954 if (ctx
->chip_class
>= EVERGREEN
) {
962 while(num_tile_pipes
--) {
963 i
= backend_map
& item_mask
;
965 backend_map
>>= item_width
;
968 ctx
->backend_mask
= mask
;
973 /* otherwise backup path for older kernels */
975 /* create buffer for event data */
976 buffer
= (struct r600_resource
*)
977 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
978 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
982 /* initialize buffer with zeroes */
983 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
985 memset(results
, 0, ctx
->max_db
* 4 * 4);
987 /* emit EVENT_WRITE for ZPASS_DONE */
988 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
989 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
990 radeon_emit(cs
, buffer
->gpu_address
);
991 radeon_emit(cs
, buffer
->gpu_address
>> 32);
993 r600_emit_reloc(ctx
, &ctx
->rings
.gfx
, buffer
, RADEON_USAGE_WRITE
, RADEON_PRIO_MIN
);
995 /* analyze results */
996 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
998 for(i
= 0; i
< ctx
->max_db
; i
++) {
999 /* at least highest bit will be set if backend is used */
1000 if (results
[i
*4 + 1])
1006 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
1009 ctx
->backend_mask
= mask
;
1014 /* fallback to old method - set num_backends lower bits to 1 */
1015 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
1019 void r600_query_init(struct r600_common_context
*rctx
)
1021 rctx
->b
.create_query
= r600_create_query
;
1022 rctx
->b
.destroy_query
= r600_destroy_query
;
1023 rctx
->b
.begin_query
= r600_begin_query
;
1024 rctx
->b
.end_query
= r600_end_query
;
1025 rctx
->b
.get_query_result
= r600_get_query_result
;
1027 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.r600_num_backends
> 0)
1028 rctx
->b
.render_condition
= r600_render_condition
;
1030 LIST_INITHEAD(&rctx
->active_nontimer_queries
);
1031 LIST_INITHEAD(&rctx
->active_timer_queries
);