2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw
{
33 uint64_t begin_result
;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle
*fence
;
39 static void r600_query_sw_destroy(struct r600_common_context
*rctx
,
40 struct r600_query
*rquery
)
42 struct pipe_screen
*screen
= rctx
->b
.screen
;
43 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
45 screen
->fence_reference(screen
, &query
->fence
, NULL
);
49 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
52 case R600_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
53 case R600_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
54 case R600_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
55 case R600_QUERY_NUM_CS_FLUSHES
: return RADEON_NUM_CS_FLUSHES
;
56 case R600_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
57 case R600_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
58 case R600_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
59 case R600_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
60 case R600_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
61 case R600_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
62 default: unreachable("query type does not correspond to winsys id");
66 static boolean
r600_query_sw_begin(struct r600_common_context
*rctx
,
67 struct r600_query
*rquery
)
69 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
71 switch(query
->b
.type
) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
73 case PIPE_QUERY_GPU_FINISHED
:
75 case R600_QUERY_DRAW_CALLS
:
76 query
->begin_result
= rctx
->num_draw_calls
;
78 case R600_QUERY_REQUESTED_VRAM
:
79 case R600_QUERY_REQUESTED_GTT
:
80 case R600_QUERY_VRAM_USAGE
:
81 case R600_QUERY_GTT_USAGE
:
82 case R600_QUERY_GPU_TEMPERATURE
:
83 case R600_QUERY_CURRENT_GPU_SCLK
:
84 case R600_QUERY_CURRENT_GPU_MCLK
:
85 query
->begin_result
= 0;
87 case R600_QUERY_BUFFER_WAIT_TIME
:
88 case R600_QUERY_NUM_CS_FLUSHES
:
89 case R600_QUERY_NUM_BYTES_MOVED
: {
90 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
91 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
94 case R600_QUERY_GPU_LOAD
:
95 query
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
97 case R600_QUERY_NUM_COMPILATIONS
:
98 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
100 case R600_QUERY_NUM_SHADERS_CREATED
:
101 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
104 unreachable("r600_query_sw_begin: bad query type");
110 static void r600_query_sw_end(struct r600_common_context
*rctx
,
111 struct r600_query
*rquery
)
113 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
115 switch(query
->b
.type
) {
116 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
118 case PIPE_QUERY_GPU_FINISHED
:
119 rctx
->b
.flush(&rctx
->b
, &query
->fence
, 0);
121 case R600_QUERY_DRAW_CALLS
:
122 query
->begin_result
= rctx
->num_draw_calls
;
124 case R600_QUERY_REQUESTED_VRAM
:
125 case R600_QUERY_REQUESTED_GTT
:
126 case R600_QUERY_VRAM_USAGE
:
127 case R600_QUERY_GTT_USAGE
:
128 case R600_QUERY_GPU_TEMPERATURE
:
129 case R600_QUERY_CURRENT_GPU_SCLK
:
130 case R600_QUERY_CURRENT_GPU_MCLK
:
131 case R600_QUERY_BUFFER_WAIT_TIME
:
132 case R600_QUERY_NUM_CS_FLUSHES
:
133 case R600_QUERY_NUM_BYTES_MOVED
: {
134 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
135 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
138 case R600_QUERY_GPU_LOAD
:
139 query
->end_result
= r600_gpu_load_end(rctx
->screen
,
140 query
->begin_result
);
141 query
->begin_result
= 0;
143 case R600_QUERY_NUM_COMPILATIONS
:
144 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
146 case R600_QUERY_NUM_SHADERS_CREATED
:
147 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
150 unreachable("r600_query_sw_end: bad query type");
154 static boolean
r600_query_sw_get_result(struct r600_common_context
*rctx
,
155 struct r600_query
*rquery
,
157 union pipe_query_result
*result
)
159 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
161 switch (query
->b
.type
) {
162 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
163 /* Convert from cycles per millisecond to cycles per second (Hz). */
164 result
->timestamp_disjoint
.frequency
=
165 (uint64_t)rctx
->screen
->info
.r600_clock_crystal_freq
* 1000;
166 result
->timestamp_disjoint
.disjoint
= FALSE
;
168 case PIPE_QUERY_GPU_FINISHED
: {
169 struct pipe_screen
*screen
= rctx
->b
.screen
;
170 result
->b
= screen
->fence_finish(screen
, query
->fence
,
171 wait
? PIPE_TIMEOUT_INFINITE
: 0);
176 result
->u64
= query
->end_result
- query
->begin_result
;
178 switch (query
->b
.type
) {
179 case R600_QUERY_BUFFER_WAIT_TIME
:
180 case R600_QUERY_GPU_TEMPERATURE
:
183 case R600_QUERY_CURRENT_GPU_SCLK
:
184 case R600_QUERY_CURRENT_GPU_MCLK
:
185 result
->u64
*= 1000000;
192 static struct r600_query_ops sw_query_ops
= {
193 .destroy
= r600_query_sw_destroy
,
194 .begin
= r600_query_sw_begin
,
195 .end
= r600_query_sw_end
,
196 .get_result
= r600_query_sw_get_result
199 static struct pipe_query
*r600_query_sw_create(struct pipe_context
*ctx
,
202 struct r600_query_sw
*query
;
204 query
= CALLOC_STRUCT(r600_query_sw
);
208 query
->b
.type
= query_type
;
209 query
->b
.ops
= &sw_query_ops
;
211 return (struct pipe_query
*)query
;
214 void r600_query_hw_destroy(struct r600_common_context
*rctx
,
215 struct r600_query
*rquery
)
217 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
218 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
220 /* Release all query buffers. */
222 struct r600_query_buffer
*qbuf
= prev
;
223 prev
= prev
->previous
;
224 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
228 pipe_resource_reference((struct pipe_resource
**)&query
->buffer
.buf
, NULL
);
232 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
,
233 struct r600_query_hw
*query
)
235 unsigned buf_size
= 4096;
237 /* Queries are normally read by the CPU after
238 * being written by the gpu, hence staging is probably a good
241 struct r600_resource
*buf
= (struct r600_resource
*)
242 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
243 PIPE_USAGE_STAGING
, buf_size
);
245 if (query
->ops
->prepare_buffer
)
246 query
->ops
->prepare_buffer(ctx
, query
, buf
);
251 static void r600_query_hw_prepare_buffer(struct r600_common_context
*ctx
,
252 struct r600_query_hw
*query
,
253 struct r600_resource
*buffer
)
257 if (query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
||
258 query
->b
.type
== PIPE_QUERY_TIMESTAMP
)
261 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
,
262 PIPE_TRANSFER_WRITE
);
264 memset(results
, 0, buffer
->b
.b
.width0
);
266 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
267 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
268 unsigned num_results
;
271 /* Set top bits for unused backends. */
272 num_results
= buffer
->b
.b
.width0
/ (16 * ctx
->max_db
);
273 for (j
= 0; j
< num_results
; j
++) {
274 for (i
= 0; i
< ctx
->max_db
; i
++) {
275 if (!(ctx
->backend_mask
& (1<<i
))) {
276 results
[(i
* 4)+1] = 0x80000000;
277 results
[(i
* 4)+3] = 0x80000000;
280 results
+= 4 * ctx
->max_db
;
285 static struct r600_query_ops query_hw_ops
= {
286 .destroy
= r600_query_hw_destroy
,
287 .begin
= r600_query_hw_begin
,
288 .end
= r600_query_hw_end
,
289 .get_result
= r600_query_hw_get_result
,
292 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
293 struct r600_query_hw
*query
,
294 struct r600_resource
*buffer
,
296 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
297 struct r600_query_hw
*query
,
298 struct r600_resource
*buffer
,
300 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
301 struct r600_query_hw
*, void *buffer
,
302 union pipe_query_result
*result
);
303 static void r600_query_hw_clear_result(struct r600_query_hw
*,
304 union pipe_query_result
*);
306 static struct r600_query_hw_ops query_hw_default_hw_ops
= {
307 .prepare_buffer
= r600_query_hw_prepare_buffer
,
308 .emit_start
= r600_query_hw_do_emit_start
,
309 .emit_stop
= r600_query_hw_do_emit_stop
,
310 .clear_result
= r600_query_hw_clear_result
,
311 .add_result
= r600_query_hw_add_result
,
314 boolean
r600_query_hw_init(struct r600_common_context
*rctx
,
315 struct r600_query_hw
*query
)
317 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
318 if (!query
->buffer
.buf
)
324 static struct pipe_query
*r600_query_hw_create(struct r600_common_context
*rctx
,
328 struct r600_query_hw
*query
= CALLOC_STRUCT(r600_query_hw
);
332 query
->b
.type
= query_type
;
333 query
->b
.ops
= &query_hw_ops
;
334 query
->ops
= &query_hw_default_hw_ops
;
336 switch (query_type
) {
337 case PIPE_QUERY_OCCLUSION_COUNTER
:
338 case PIPE_QUERY_OCCLUSION_PREDICATE
:
339 query
->result_size
= 16 * rctx
->max_db
;
340 query
->num_cs_dw
= 6;
342 case PIPE_QUERY_TIME_ELAPSED
:
343 query
->result_size
= 16;
344 query
->num_cs_dw
= 8;
345 query
->flags
= R600_QUERY_HW_FLAG_TIMER
;
347 case PIPE_QUERY_TIMESTAMP
:
348 query
->result_size
= 8;
349 query
->num_cs_dw
= 8;
350 query
->flags
= R600_QUERY_HW_FLAG_TIMER
|
351 R600_QUERY_HW_FLAG_NO_START
;
353 case PIPE_QUERY_PRIMITIVES_EMITTED
:
354 case PIPE_QUERY_PRIMITIVES_GENERATED
:
355 case PIPE_QUERY_SO_STATISTICS
:
356 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
357 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
358 query
->result_size
= 32;
359 query
->num_cs_dw
= 6;
360 query
->stream
= index
;
362 case PIPE_QUERY_PIPELINE_STATISTICS
:
363 /* 11 values on EG, 8 on R600. */
364 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
365 query
->num_cs_dw
= 6;
373 if (!r600_query_hw_init(rctx
, query
)) {
378 return (struct pipe_query
*)query
;
381 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
382 unsigned type
, int diff
)
384 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
385 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
386 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
389 rctx
->num_occlusion_queries
+= diff
;
390 assert(rctx
->num_occlusion_queries
>= 0);
392 enable
= rctx
->num_occlusion_queries
!= 0;
394 if (enable
!= old_enable
) {
395 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
400 static unsigned event_type_for_stream(struct r600_query_hw
*query
)
402 switch (query
->stream
) {
404 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
405 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
406 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
407 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
411 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
412 struct r600_query_hw
*query
,
413 struct r600_resource
*buffer
,
416 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
418 switch (query
->b
.type
) {
419 case PIPE_QUERY_OCCLUSION_COUNTER
:
420 case PIPE_QUERY_OCCLUSION_PREDICATE
:
421 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
422 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
424 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
426 case PIPE_QUERY_PRIMITIVES_EMITTED
:
427 case PIPE_QUERY_PRIMITIVES_GENERATED
:
428 case PIPE_QUERY_SO_STATISTICS
:
429 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
430 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
431 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
433 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
435 case PIPE_QUERY_TIME_ELAPSED
:
436 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
437 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
439 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
443 case PIPE_QUERY_PIPELINE_STATISTICS
:
444 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
445 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
447 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
452 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
456 static void r600_query_hw_emit_start(struct r600_common_context
*ctx
,
457 struct r600_query_hw
*query
)
461 r600_update_occlusion_query_state(ctx
, query
->b
.type
, 1);
462 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, 1);
463 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
* 2, TRUE
);
465 /* Get a new query buffer if needed. */
466 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
467 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
468 *qbuf
= query
->buffer
;
469 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
);
470 query
->buffer
.results_end
= 0;
471 query
->buffer
.previous
= qbuf
;
474 /* emit begin query */
475 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
477 query
->ops
->emit_start(ctx
, query
, query
->buffer
.buf
, va
);
479 if (query
->flags
& R600_QUERY_HW_FLAG_TIMER
)
480 ctx
->num_cs_dw_timer_queries_suspend
+= query
->num_cs_dw
;
482 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
486 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
487 struct r600_query_hw
*query
,
488 struct r600_resource
*buffer
,
491 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
493 switch (query
->b
.type
) {
494 case PIPE_QUERY_OCCLUSION_COUNTER
:
495 case PIPE_QUERY_OCCLUSION_PREDICATE
:
497 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
498 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
500 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
502 case PIPE_QUERY_PRIMITIVES_EMITTED
:
503 case PIPE_QUERY_PRIMITIVES_GENERATED
:
504 case PIPE_QUERY_SO_STATISTICS
:
505 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
506 va
+= query
->result_size
/2;
507 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
508 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
510 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
512 case PIPE_QUERY_TIME_ELAPSED
:
513 va
+= query
->result_size
/2;
515 case PIPE_QUERY_TIMESTAMP
:
516 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
517 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
519 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
523 case PIPE_QUERY_PIPELINE_STATISTICS
:
524 va
+= query
->result_size
/2;
525 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
526 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
528 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
533 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
537 static void r600_query_hw_emit_stop(struct r600_common_context
*ctx
,
538 struct r600_query_hw
*query
)
542 /* The queries which need begin already called this in begin_query. */
543 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
544 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw
, FALSE
);
548 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
550 query
->ops
->emit_stop(ctx
, query
, query
->buffer
.buf
, va
);
552 query
->buffer
.results_end
+= query
->result_size
;
554 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
)) {
555 if (query
->flags
& R600_QUERY_HW_FLAG_TIMER
)
556 ctx
->num_cs_dw_timer_queries_suspend
-= query
->num_cs_dw
;
558 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
561 r600_update_occlusion_query_state(ctx
, query
->b
.type
, -1);
562 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, -1);
565 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
566 struct r600_atom
*atom
)
568 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
569 struct r600_query_hw
*query
= (struct r600_query_hw
*)ctx
->render_cond
;
570 struct r600_query_buffer
*qbuf
;
577 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
578 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
580 switch (query
->b
.type
) {
581 case PIPE_QUERY_OCCLUSION_COUNTER
:
582 case PIPE_QUERY_OCCLUSION_PREDICATE
:
583 op
= PRED_OP(PREDICATION_OP_ZPASS
);
585 case PIPE_QUERY_PRIMITIVES_EMITTED
:
586 case PIPE_QUERY_PRIMITIVES_GENERATED
:
587 case PIPE_QUERY_SO_STATISTICS
:
588 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
589 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
596 /* if true then invert, see GL_ARB_conditional_render_inverted */
597 if (ctx
->render_cond_invert
)
598 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
600 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
602 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
604 /* emit predicate packets for all data blocks */
605 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
606 unsigned results_base
= 0;
607 uint64_t va
= qbuf
->buf
->gpu_address
;
609 while (results_base
< qbuf
->results_end
) {
610 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
611 radeon_emit(cs
, va
+ results_base
);
612 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32) & 0xFF));
613 r600_emit_reloc(ctx
, &ctx
->gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
615 results_base
+= query
->result_size
;
617 /* set CONTINUE bit for all packets except the first */
618 op
|= PREDICATION_CONTINUE
;
623 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
625 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
627 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
628 query_type
== PIPE_QUERY_GPU_FINISHED
||
629 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
630 return r600_query_sw_create(ctx
, query_type
);
632 return r600_query_hw_create(rctx
, query_type
, index
);
635 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
637 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
638 struct r600_query
*rquery
= (struct r600_query
*)query
;
640 rquery
->ops
->destroy(rctx
, rquery
);
643 static boolean
r600_begin_query(struct pipe_context
*ctx
,
644 struct pipe_query
*query
)
646 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
647 struct r600_query
*rquery
= (struct r600_query
*)query
;
649 return rquery
->ops
->begin(rctx
, rquery
);
652 boolean
r600_query_hw_begin(struct r600_common_context
*rctx
,
653 struct r600_query
*rquery
)
655 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
656 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
658 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
663 /* Discard the old query buffers. */
665 struct r600_query_buffer
*qbuf
= prev
;
666 prev
= prev
->previous
;
667 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
671 /* Obtain a new buffer if the current one can't be mapped without a stall. */
672 if (r600_rings_is_buffer_referenced(rctx
, query
->buffer
.buf
->cs_buf
, RADEON_USAGE_READWRITE
) ||
673 !rctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
674 pipe_resource_reference((struct pipe_resource
**)&query
->buffer
.buf
, NULL
);
675 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
678 query
->buffer
.results_end
= 0;
679 query
->buffer
.previous
= NULL
;
681 r600_query_hw_emit_start(rctx
, query
);
683 if (query
->flags
& R600_QUERY_HW_FLAG_TIMER
)
684 LIST_ADDTAIL(&query
->list
, &rctx
->active_timer_queries
);
686 LIST_ADDTAIL(&query
->list
, &rctx
->active_nontimer_queries
);
690 static void r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
692 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
693 struct r600_query
*rquery
= (struct r600_query
*)query
;
695 rquery
->ops
->end(rctx
, rquery
);
698 void r600_query_hw_end(struct r600_common_context
*rctx
,
699 struct r600_query
*rquery
)
701 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
703 r600_query_hw_emit_stop(rctx
, query
);
705 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
706 LIST_DELINIT(&query
->list
);
709 static unsigned r600_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
710 bool test_status_bit
)
712 uint32_t *current_result
= (uint32_t*)map
;
715 start
= (uint64_t)current_result
[start_index
] |
716 (uint64_t)current_result
[start_index
+1] << 32;
717 end
= (uint64_t)current_result
[end_index
] |
718 (uint64_t)current_result
[end_index
+1] << 32;
720 if (!test_status_bit
||
721 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
727 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
728 struct r600_query_hw
*query
,
730 union pipe_query_result
*result
)
732 switch (query
->b
.type
) {
733 case PIPE_QUERY_OCCLUSION_COUNTER
: {
734 unsigned results_base
= 0;
735 while (results_base
!= query
->result_size
) {
737 r600_query_read_result(buffer
+ results_base
, 0, 2, true);
742 case PIPE_QUERY_OCCLUSION_PREDICATE
: {
743 unsigned results_base
= 0;
744 while (results_base
!= query
->result_size
) {
745 result
->b
= result
->b
||
746 r600_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
751 case PIPE_QUERY_TIME_ELAPSED
:
752 result
->u64
+= r600_query_read_result(buffer
, 0, 2, false);
754 case PIPE_QUERY_TIMESTAMP
:
756 uint32_t *current_result
= (uint32_t*)buffer
;
757 result
->u64
= (uint64_t)current_result
[0] |
758 (uint64_t)current_result
[1] << 32;
761 case PIPE_QUERY_PRIMITIVES_EMITTED
:
762 /* SAMPLE_STREAMOUTSTATS stores this structure:
764 * u64 NumPrimitivesWritten;
765 * u64 PrimitiveStorageNeeded;
767 * We only need NumPrimitivesWritten here. */
768 result
->u64
+= r600_query_read_result(buffer
, 2, 6, true);
770 case PIPE_QUERY_PRIMITIVES_GENERATED
:
771 /* Here we read PrimitiveStorageNeeded. */
772 result
->u64
+= r600_query_read_result(buffer
, 0, 4, true);
774 case PIPE_QUERY_SO_STATISTICS
:
775 result
->so_statistics
.num_primitives_written
+=
776 r600_query_read_result(buffer
, 2, 6, true);
777 result
->so_statistics
.primitives_storage_needed
+=
778 r600_query_read_result(buffer
, 0, 4, true);
780 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
781 result
->b
= result
->b
||
782 r600_query_read_result(buffer
, 2, 6, true) !=
783 r600_query_read_result(buffer
, 0, 4, true);
785 case PIPE_QUERY_PIPELINE_STATISTICS
:
786 if (ctx
->chip_class
>= EVERGREEN
) {
787 result
->pipeline_statistics
.ps_invocations
+=
788 r600_query_read_result(buffer
, 0, 22, false);
789 result
->pipeline_statistics
.c_primitives
+=
790 r600_query_read_result(buffer
, 2, 24, false);
791 result
->pipeline_statistics
.c_invocations
+=
792 r600_query_read_result(buffer
, 4, 26, false);
793 result
->pipeline_statistics
.vs_invocations
+=
794 r600_query_read_result(buffer
, 6, 28, false);
795 result
->pipeline_statistics
.gs_invocations
+=
796 r600_query_read_result(buffer
, 8, 30, false);
797 result
->pipeline_statistics
.gs_primitives
+=
798 r600_query_read_result(buffer
, 10, 32, false);
799 result
->pipeline_statistics
.ia_primitives
+=
800 r600_query_read_result(buffer
, 12, 34, false);
801 result
->pipeline_statistics
.ia_vertices
+=
802 r600_query_read_result(buffer
, 14, 36, false);
803 result
->pipeline_statistics
.hs_invocations
+=
804 r600_query_read_result(buffer
, 16, 38, false);
805 result
->pipeline_statistics
.ds_invocations
+=
806 r600_query_read_result(buffer
, 18, 40, false);
807 result
->pipeline_statistics
.cs_invocations
+=
808 r600_query_read_result(buffer
, 20, 42, false);
810 result
->pipeline_statistics
.ps_invocations
+=
811 r600_query_read_result(buffer
, 0, 16, false);
812 result
->pipeline_statistics
.c_primitives
+=
813 r600_query_read_result(buffer
, 2, 18, false);
814 result
->pipeline_statistics
.c_invocations
+=
815 r600_query_read_result(buffer
, 4, 20, false);
816 result
->pipeline_statistics
.vs_invocations
+=
817 r600_query_read_result(buffer
, 6, 22, false);
818 result
->pipeline_statistics
.gs_invocations
+=
819 r600_query_read_result(buffer
, 8, 24, false);
820 result
->pipeline_statistics
.gs_primitives
+=
821 r600_query_read_result(buffer
, 10, 26, false);
822 result
->pipeline_statistics
.ia_primitives
+=
823 r600_query_read_result(buffer
, 12, 28, false);
824 result
->pipeline_statistics
.ia_vertices
+=
825 r600_query_read_result(buffer
, 14, 30, false);
827 #if 0 /* for testing */
828 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
829 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
830 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
831 result
->pipeline_statistics
.ia_vertices
,
832 result
->pipeline_statistics
.ia_primitives
,
833 result
->pipeline_statistics
.vs_invocations
,
834 result
->pipeline_statistics
.hs_invocations
,
835 result
->pipeline_statistics
.ds_invocations
,
836 result
->pipeline_statistics
.gs_invocations
,
837 result
->pipeline_statistics
.gs_primitives
,
838 result
->pipeline_statistics
.c_invocations
,
839 result
->pipeline_statistics
.c_primitives
,
840 result
->pipeline_statistics
.ps_invocations
,
841 result
->pipeline_statistics
.cs_invocations
);
849 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
850 struct pipe_query
*query
, boolean wait
,
851 union pipe_query_result
*result
)
853 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
854 struct r600_query
*rquery
= (struct r600_query
*)query
;
856 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
859 static void r600_query_hw_clear_result(struct r600_query_hw
*query
,
860 union pipe_query_result
*result
)
862 util_query_clear_result(result
, query
->b
.type
);
865 boolean
r600_query_hw_get_result(struct r600_common_context
*rctx
,
866 struct r600_query
*rquery
,
867 boolean wait
, union pipe_query_result
*result
)
869 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
870 struct r600_query_buffer
*qbuf
;
872 query
->ops
->clear_result(query
, result
);
874 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
875 unsigned results_base
= 0;
878 map
= r600_buffer_map_sync_with_rings(rctx
, qbuf
->buf
,
880 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
884 while (results_base
!= qbuf
->results_end
) {
885 query
->ops
->add_result(rctx
, query
, map
+ results_base
,
887 results_base
+= query
->result_size
;
891 /* Convert the time to expected units. */
892 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
893 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
894 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.r600_clock_crystal_freq
;
899 static void r600_render_condition(struct pipe_context
*ctx
,
900 struct pipe_query
*query
,
904 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
905 struct r600_query_hw
*rquery
= (struct r600_query_hw
*)query
;
906 struct r600_query_buffer
*qbuf
;
907 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
909 rctx
->render_cond
= query
;
910 rctx
->render_cond_invert
= condition
;
911 rctx
->render_cond_mode
= mode
;
913 /* Compute the size of SET_PREDICATION packets. */
916 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
917 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
920 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
923 static void r600_suspend_queries(struct r600_common_context
*ctx
,
924 struct list_head
*query_list
,
925 unsigned *num_cs_dw_queries_suspend
)
927 struct r600_query_hw
*query
;
929 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
930 r600_query_hw_emit_stop(ctx
, query
);
932 assert(*num_cs_dw_queries_suspend
== 0);
935 void r600_suspend_nontimer_queries(struct r600_common_context
*ctx
)
937 r600_suspend_queries(ctx
, &ctx
->active_nontimer_queries
,
938 &ctx
->num_cs_dw_nontimer_queries_suspend
);
941 void r600_suspend_timer_queries(struct r600_common_context
*ctx
)
943 r600_suspend_queries(ctx
, &ctx
->active_timer_queries
,
944 &ctx
->num_cs_dw_timer_queries_suspend
);
947 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
948 struct list_head
*query_list
)
950 struct r600_query_hw
*query
;
953 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
955 num_dw
+= query
->num_cs_dw
* 2;
957 /* Workaround for the fact that
958 * num_cs_dw_nontimer_queries_suspend is incremented for every
959 * resumed query, which raises the bar in need_cs_space for
960 * queries about to be resumed.
962 num_dw
+= query
->num_cs_dw
;
964 /* primitives generated query */
965 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
966 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
972 static void r600_resume_queries(struct r600_common_context
*ctx
,
973 struct list_head
*query_list
,
974 unsigned *num_cs_dw_queries_suspend
)
976 struct r600_query_hw
*query
;
977 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, query_list
);
979 assert(*num_cs_dw_queries_suspend
== 0);
981 /* Check CS space here. Resuming must not be interrupted by flushes. */
982 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, TRUE
);
984 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
985 r600_query_hw_emit_start(ctx
, query
);
989 void r600_resume_nontimer_queries(struct r600_common_context
*ctx
)
991 r600_resume_queries(ctx
, &ctx
->active_nontimer_queries
,
992 &ctx
->num_cs_dw_nontimer_queries_suspend
);
995 void r600_resume_timer_queries(struct r600_common_context
*ctx
)
997 r600_resume_queries(ctx
, &ctx
->active_timer_queries
,
998 &ctx
->num_cs_dw_timer_queries_suspend
);
1001 /* Get backends mask */
1002 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
1004 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
1005 struct r600_resource
*buffer
;
1007 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
1008 unsigned i
, mask
= 0;
1010 /* if backend_map query is supported by the kernel */
1011 if (ctx
->screen
->info
.r600_backend_map_valid
) {
1012 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
1013 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
1014 unsigned item_width
, item_mask
;
1016 if (ctx
->chip_class
>= EVERGREEN
) {
1024 while(num_tile_pipes
--) {
1025 i
= backend_map
& item_mask
;
1027 backend_map
>>= item_width
;
1030 ctx
->backend_mask
= mask
;
1035 /* otherwise backup path for older kernels */
1037 /* create buffer for event data */
1038 buffer
= (struct r600_resource
*)
1039 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
1040 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
1044 /* initialize buffer with zeroes */
1045 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
1047 memset(results
, 0, ctx
->max_db
* 4 * 4);
1049 /* emit EVENT_WRITE for ZPASS_DONE */
1050 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1051 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
1052 radeon_emit(cs
, buffer
->gpu_address
);
1053 radeon_emit(cs
, buffer
->gpu_address
>> 32);
1055 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
1056 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
1058 /* analyze results */
1059 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
1061 for(i
= 0; i
< ctx
->max_db
; i
++) {
1062 /* at least highest bit will be set if backend is used */
1063 if (results
[i
*4 + 1])
1069 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
1072 ctx
->backend_mask
= mask
;
1077 /* fallback to old method - set num_backends lower bits to 1 */
1078 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
1082 #define X(name_, query_type_, type_, result_type_) \
1085 .query_type = R600_QUERY_##query_type_, \
1086 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1087 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1088 .group_id = ~(unsigned)0 \
1091 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1092 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1093 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1094 X("draw-calls", DRAW_CALLS
, UINT64
, CUMULATIVE
),
1095 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1096 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1097 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1098 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, CUMULATIVE
),
1099 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1100 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1101 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1102 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1103 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1104 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1105 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1110 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1112 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1113 return Elements(r600_driver_query_list
);
1114 else if (rscreen
->info
.drm_major
== 3)
1115 return Elements(r600_driver_query_list
) - 3;
1117 return Elements(r600_driver_query_list
) - 4;
1120 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
1122 struct pipe_driver_query_info
*info
)
1124 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1125 unsigned num_queries
= r600_get_num_queries(rscreen
);
1130 if (index
>= num_queries
)
1133 *info
= r600_driver_query_list
[index
];
1135 switch (info
->query_type
) {
1136 case R600_QUERY_REQUESTED_VRAM
:
1137 case R600_QUERY_VRAM_USAGE
:
1138 info
->max_value
.u64
= rscreen
->info
.vram_size
;
1140 case R600_QUERY_REQUESTED_GTT
:
1141 case R600_QUERY_GTT_USAGE
:
1142 info
->max_value
.u64
= rscreen
->info
.gart_size
;
1144 case R600_QUERY_GPU_TEMPERATURE
:
1145 info
->max_value
.u64
= 125;
1152 void r600_query_init(struct r600_common_context
*rctx
)
1154 rctx
->b
.create_query
= r600_create_query
;
1155 rctx
->b
.destroy_query
= r600_destroy_query
;
1156 rctx
->b
.begin_query
= r600_begin_query
;
1157 rctx
->b
.end_query
= r600_end_query
;
1158 rctx
->b
.get_query_result
= r600_get_query_result
;
1159 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
1161 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.r600_num_backends
> 0)
1162 rctx
->b
.render_condition
= r600_render_condition
;
1164 LIST_INITHEAD(&rctx
->active_nontimer_queries
);
1165 LIST_INITHEAD(&rctx
->active_timer_queries
);
1168 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
1170 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;