2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "r600_query.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
30 #include "tgsi/tgsi_text.h"
32 struct r600_hw_query_params
{
33 unsigned start_offset
;
35 unsigned fence_offset
;
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw
{
44 uint64_t begin_result
;
46 /* Fence for GPU_FINISHED. */
47 struct pipe_fence_handle
*fence
;
50 static void r600_query_sw_destroy(struct r600_common_context
*rctx
,
51 struct r600_query
*rquery
)
53 struct pipe_screen
*screen
= rctx
->b
.screen
;
54 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
56 screen
->fence_reference(screen
, &query
->fence
, NULL
);
60 static enum radeon_value_id
winsys_id_from_type(unsigned type
)
63 case R600_QUERY_REQUESTED_VRAM
: return RADEON_REQUESTED_VRAM_MEMORY
;
64 case R600_QUERY_REQUESTED_GTT
: return RADEON_REQUESTED_GTT_MEMORY
;
65 case R600_QUERY_MAPPED_VRAM
: return RADEON_MAPPED_VRAM
;
66 case R600_QUERY_MAPPED_GTT
: return RADEON_MAPPED_GTT
;
67 case R600_QUERY_BUFFER_WAIT_TIME
: return RADEON_BUFFER_WAIT_TIME_NS
;
68 case R600_QUERY_NUM_CTX_FLUSHES
: return RADEON_NUM_CS_FLUSHES
;
69 case R600_QUERY_NUM_BYTES_MOVED
: return RADEON_NUM_BYTES_MOVED
;
70 case R600_QUERY_NUM_EVICTIONS
: return RADEON_NUM_EVICTIONS
;
71 case R600_QUERY_VRAM_USAGE
: return RADEON_VRAM_USAGE
;
72 case R600_QUERY_GTT_USAGE
: return RADEON_GTT_USAGE
;
73 case R600_QUERY_GPU_TEMPERATURE
: return RADEON_GPU_TEMPERATURE
;
74 case R600_QUERY_CURRENT_GPU_SCLK
: return RADEON_CURRENT_SCLK
;
75 case R600_QUERY_CURRENT_GPU_MCLK
: return RADEON_CURRENT_MCLK
;
76 default: unreachable("query type does not correspond to winsys id");
80 static bool r600_query_sw_begin(struct r600_common_context
*rctx
,
81 struct r600_query
*rquery
)
83 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
85 switch(query
->b
.type
) {
86 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
87 case PIPE_QUERY_GPU_FINISHED
:
89 case R600_QUERY_DRAW_CALLS
:
90 query
->begin_result
= rctx
->num_draw_calls
;
92 case R600_QUERY_SPILL_DRAW_CALLS
:
93 query
->begin_result
= rctx
->num_spill_draw_calls
;
95 case R600_QUERY_COMPUTE_CALLS
:
96 query
->begin_result
= rctx
->num_compute_calls
;
98 case R600_QUERY_SPILL_COMPUTE_CALLS
:
99 query
->begin_result
= rctx
->num_spill_compute_calls
;
101 case R600_QUERY_DMA_CALLS
:
102 query
->begin_result
= rctx
->num_dma_calls
;
104 case R600_QUERY_NUM_VS_FLUSHES
:
105 query
->begin_result
= rctx
->num_vs_flushes
;
107 case R600_QUERY_NUM_PS_FLUSHES
:
108 query
->begin_result
= rctx
->num_ps_flushes
;
110 case R600_QUERY_NUM_CS_FLUSHES
:
111 query
->begin_result
= rctx
->num_cs_flushes
;
113 case R600_QUERY_REQUESTED_VRAM
:
114 case R600_QUERY_REQUESTED_GTT
:
115 case R600_QUERY_MAPPED_VRAM
:
116 case R600_QUERY_MAPPED_GTT
:
117 case R600_QUERY_VRAM_USAGE
:
118 case R600_QUERY_GTT_USAGE
:
119 case R600_QUERY_GPU_TEMPERATURE
:
120 case R600_QUERY_CURRENT_GPU_SCLK
:
121 case R600_QUERY_CURRENT_GPU_MCLK
:
122 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
123 query
->begin_result
= 0;
125 case R600_QUERY_BUFFER_WAIT_TIME
:
126 case R600_QUERY_NUM_CTX_FLUSHES
:
127 case R600_QUERY_NUM_BYTES_MOVED
:
128 case R600_QUERY_NUM_EVICTIONS
: {
129 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
130 query
->begin_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
133 case R600_QUERY_GPU_LOAD
:
134 query
->begin_result
= r600_gpu_load_begin(rctx
->screen
);
136 case R600_QUERY_NUM_COMPILATIONS
:
137 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
139 case R600_QUERY_NUM_SHADERS_CREATED
:
140 query
->begin_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
142 case R600_QUERY_GPIN_ASIC_ID
:
143 case R600_QUERY_GPIN_NUM_SIMD
:
144 case R600_QUERY_GPIN_NUM_RB
:
145 case R600_QUERY_GPIN_NUM_SPI
:
146 case R600_QUERY_GPIN_NUM_SE
:
149 unreachable("r600_query_sw_begin: bad query type");
155 static bool r600_query_sw_end(struct r600_common_context
*rctx
,
156 struct r600_query
*rquery
)
158 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
160 switch(query
->b
.type
) {
161 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
163 case PIPE_QUERY_GPU_FINISHED
:
164 rctx
->b
.flush(&rctx
->b
, &query
->fence
, PIPE_FLUSH_DEFERRED
);
166 case R600_QUERY_DRAW_CALLS
:
167 query
->end_result
= rctx
->num_draw_calls
;
169 case R600_QUERY_SPILL_DRAW_CALLS
:
170 query
->end_result
= rctx
->num_spill_draw_calls
;
172 case R600_QUERY_COMPUTE_CALLS
:
173 query
->end_result
= rctx
->num_compute_calls
;
175 case R600_QUERY_SPILL_COMPUTE_CALLS
:
176 query
->end_result
= rctx
->num_spill_compute_calls
;
178 case R600_QUERY_DMA_CALLS
:
179 query
->end_result
= rctx
->num_dma_calls
;
181 case R600_QUERY_NUM_VS_FLUSHES
:
182 query
->end_result
= rctx
->num_vs_flushes
;
184 case R600_QUERY_NUM_PS_FLUSHES
:
185 query
->end_result
= rctx
->num_ps_flushes
;
187 case R600_QUERY_NUM_CS_FLUSHES
:
188 query
->end_result
= rctx
->num_cs_flushes
;
190 case R600_QUERY_REQUESTED_VRAM
:
191 case R600_QUERY_REQUESTED_GTT
:
192 case R600_QUERY_MAPPED_VRAM
:
193 case R600_QUERY_MAPPED_GTT
:
194 case R600_QUERY_VRAM_USAGE
:
195 case R600_QUERY_GTT_USAGE
:
196 case R600_QUERY_GPU_TEMPERATURE
:
197 case R600_QUERY_CURRENT_GPU_SCLK
:
198 case R600_QUERY_CURRENT_GPU_MCLK
:
199 case R600_QUERY_BUFFER_WAIT_TIME
:
200 case R600_QUERY_NUM_CTX_FLUSHES
:
201 case R600_QUERY_NUM_BYTES_MOVED
:
202 case R600_QUERY_NUM_EVICTIONS
: {
203 enum radeon_value_id ws_id
= winsys_id_from_type(query
->b
.type
);
204 query
->end_result
= rctx
->ws
->query_value(rctx
->ws
, ws_id
);
207 case R600_QUERY_GPU_LOAD
:
208 query
->end_result
= r600_gpu_load_end(rctx
->screen
,
209 query
->begin_result
);
210 query
->begin_result
= 0;
212 case R600_QUERY_NUM_COMPILATIONS
:
213 query
->end_result
= p_atomic_read(&rctx
->screen
->num_compilations
);
215 case R600_QUERY_NUM_SHADERS_CREATED
:
216 query
->end_result
= p_atomic_read(&rctx
->screen
->num_shaders_created
);
218 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO
:
219 query
->end_result
= rctx
->last_tex_ps_draw_ratio
;
221 case R600_QUERY_GPIN_ASIC_ID
:
222 case R600_QUERY_GPIN_NUM_SIMD
:
223 case R600_QUERY_GPIN_NUM_RB
:
224 case R600_QUERY_GPIN_NUM_SPI
:
225 case R600_QUERY_GPIN_NUM_SE
:
228 unreachable("r600_query_sw_end: bad query type");
234 static bool r600_query_sw_get_result(struct r600_common_context
*rctx
,
235 struct r600_query
*rquery
,
237 union pipe_query_result
*result
)
239 struct r600_query_sw
*query
= (struct r600_query_sw
*)rquery
;
241 switch (query
->b
.type
) {
242 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
243 /* Convert from cycles per millisecond to cycles per second (Hz). */
244 result
->timestamp_disjoint
.frequency
=
245 (uint64_t)rctx
->screen
->info
.clock_crystal_freq
* 1000;
246 result
->timestamp_disjoint
.disjoint
= false;
248 case PIPE_QUERY_GPU_FINISHED
: {
249 struct pipe_screen
*screen
= rctx
->b
.screen
;
250 result
->b
= screen
->fence_finish(screen
, &rctx
->b
, query
->fence
,
251 wait
? PIPE_TIMEOUT_INFINITE
: 0);
255 case R600_QUERY_GPIN_ASIC_ID
:
258 case R600_QUERY_GPIN_NUM_SIMD
:
259 result
->u32
= rctx
->screen
->info
.num_good_compute_units
;
261 case R600_QUERY_GPIN_NUM_RB
:
262 result
->u32
= rctx
->screen
->info
.num_render_backends
;
264 case R600_QUERY_GPIN_NUM_SPI
:
265 result
->u32
= 1; /* all supported chips have one SPI per SE */
267 case R600_QUERY_GPIN_NUM_SE
:
268 result
->u32
= rctx
->screen
->info
.max_se
;
272 result
->u64
= query
->end_result
- query
->begin_result
;
274 switch (query
->b
.type
) {
275 case R600_QUERY_BUFFER_WAIT_TIME
:
276 case R600_QUERY_GPU_TEMPERATURE
:
279 case R600_QUERY_CURRENT_GPU_SCLK
:
280 case R600_QUERY_CURRENT_GPU_MCLK
:
281 result
->u64
*= 1000000;
289 static struct r600_query_ops sw_query_ops
= {
290 .destroy
= r600_query_sw_destroy
,
291 .begin
= r600_query_sw_begin
,
292 .end
= r600_query_sw_end
,
293 .get_result
= r600_query_sw_get_result
,
294 .get_result_resource
= NULL
297 static struct pipe_query
*r600_query_sw_create(struct pipe_context
*ctx
,
300 struct r600_query_sw
*query
;
302 query
= CALLOC_STRUCT(r600_query_sw
);
306 query
->b
.type
= query_type
;
307 query
->b
.ops
= &sw_query_ops
;
309 return (struct pipe_query
*)query
;
312 void r600_query_hw_destroy(struct r600_common_context
*rctx
,
313 struct r600_query
*rquery
)
315 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
316 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
318 /* Release all query buffers. */
320 struct r600_query_buffer
*qbuf
= prev
;
321 prev
= prev
->previous
;
322 r600_resource_reference(&qbuf
->buf
, NULL
);
326 r600_resource_reference(&query
->buffer
.buf
, NULL
);
330 static struct r600_resource
*r600_new_query_buffer(struct r600_common_context
*ctx
,
331 struct r600_query_hw
*query
)
333 unsigned buf_size
= MAX2(query
->result_size
,
334 ctx
->screen
->info
.min_alloc_size
);
336 /* Queries are normally read by the CPU after
337 * being written by the gpu, hence staging is probably a good
340 struct r600_resource
*buf
= (struct r600_resource
*)
341 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
342 PIPE_USAGE_STAGING
, buf_size
);
346 if (!query
->ops
->prepare_buffer(ctx
, query
, buf
)) {
347 r600_resource_reference(&buf
, NULL
);
354 static bool r600_query_hw_prepare_buffer(struct r600_common_context
*ctx
,
355 struct r600_query_hw
*query
,
356 struct r600_resource
*buffer
)
358 /* Callers ensure that the buffer is currently unused by the GPU. */
359 uint32_t *results
= ctx
->ws
->buffer_map(buffer
->buf
, NULL
,
360 PIPE_TRANSFER_WRITE
|
361 PIPE_TRANSFER_UNSYNCHRONIZED
);
365 memset(results
, 0, buffer
->b
.b
.width0
);
367 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_COUNTER
||
368 query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
369 unsigned num_results
;
372 /* Set top bits for unused backends. */
373 num_results
= buffer
->b
.b
.width0
/ query
->result_size
;
374 for (j
= 0; j
< num_results
; j
++) {
375 for (i
= 0; i
< ctx
->max_db
; i
++) {
376 if (!(ctx
->backend_mask
& (1<<i
))) {
377 results
[(i
* 4)+1] = 0x80000000;
378 results
[(i
* 4)+3] = 0x80000000;
381 results
+= 4 * ctx
->max_db
;
388 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
389 struct r600_query
*rquery
,
391 enum pipe_query_value_type result_type
,
393 struct pipe_resource
*resource
,
396 static struct r600_query_ops query_hw_ops
= {
397 .destroy
= r600_query_hw_destroy
,
398 .begin
= r600_query_hw_begin
,
399 .end
= r600_query_hw_end
,
400 .get_result
= r600_query_hw_get_result
,
401 .get_result_resource
= r600_query_hw_get_result_resource
,
404 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
405 struct r600_query_hw
*query
,
406 struct r600_resource
*buffer
,
408 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
409 struct r600_query_hw
*query
,
410 struct r600_resource
*buffer
,
412 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
413 struct r600_query_hw
*, void *buffer
,
414 union pipe_query_result
*result
);
415 static void r600_query_hw_clear_result(struct r600_query_hw
*,
416 union pipe_query_result
*);
418 static struct r600_query_hw_ops query_hw_default_hw_ops
= {
419 .prepare_buffer
= r600_query_hw_prepare_buffer
,
420 .emit_start
= r600_query_hw_do_emit_start
,
421 .emit_stop
= r600_query_hw_do_emit_stop
,
422 .clear_result
= r600_query_hw_clear_result
,
423 .add_result
= r600_query_hw_add_result
,
426 bool r600_query_hw_init(struct r600_common_context
*rctx
,
427 struct r600_query_hw
*query
)
429 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
430 if (!query
->buffer
.buf
)
436 static struct pipe_query
*r600_query_hw_create(struct r600_common_context
*rctx
,
440 struct r600_query_hw
*query
= CALLOC_STRUCT(r600_query_hw
);
444 query
->b
.type
= query_type
;
445 query
->b
.ops
= &query_hw_ops
;
446 query
->ops
= &query_hw_default_hw_ops
;
448 switch (query_type
) {
449 case PIPE_QUERY_OCCLUSION_COUNTER
:
450 case PIPE_QUERY_OCCLUSION_PREDICATE
:
451 query
->result_size
= 16 * rctx
->max_db
;
452 query
->result_size
+= 16; /* for the fence + alignment */
453 query
->num_cs_dw_begin
= 6;
454 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rctx
->screen
);
456 case PIPE_QUERY_TIME_ELAPSED
:
457 query
->result_size
= 24;
458 query
->num_cs_dw_begin
= 8;
459 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rctx
->screen
);
461 case PIPE_QUERY_TIMESTAMP
:
462 query
->result_size
= 16;
463 query
->num_cs_dw_end
= 8 + r600_gfx_write_fence_dwords(rctx
->screen
);
464 query
->flags
= R600_QUERY_HW_FLAG_NO_START
;
466 case PIPE_QUERY_PRIMITIVES_EMITTED
:
467 case PIPE_QUERY_PRIMITIVES_GENERATED
:
468 case PIPE_QUERY_SO_STATISTICS
:
469 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
470 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
471 query
->result_size
= 32;
472 query
->num_cs_dw_begin
= 6;
473 query
->num_cs_dw_end
= 6;
474 query
->stream
= index
;
476 case PIPE_QUERY_PIPELINE_STATISTICS
:
477 /* 11 values on EG, 8 on R600. */
478 query
->result_size
= (rctx
->chip_class
>= EVERGREEN
? 11 : 8) * 16;
479 query
->result_size
+= 8; /* for the fence + alignment */
480 query
->num_cs_dw_begin
= 6;
481 query
->num_cs_dw_end
= 6 + r600_gfx_write_fence_dwords(rctx
->screen
);
489 if (!r600_query_hw_init(rctx
, query
)) {
494 return (struct pipe_query
*)query
;
497 static void r600_update_occlusion_query_state(struct r600_common_context
*rctx
,
498 unsigned type
, int diff
)
500 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
501 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
502 bool old_enable
= rctx
->num_occlusion_queries
!= 0;
503 bool old_perfect_enable
=
504 rctx
->num_perfect_occlusion_queries
!= 0;
505 bool enable
, perfect_enable
;
507 rctx
->num_occlusion_queries
+= diff
;
508 assert(rctx
->num_occlusion_queries
>= 0);
510 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
) {
511 rctx
->num_perfect_occlusion_queries
+= diff
;
512 assert(rctx
->num_perfect_occlusion_queries
>= 0);
515 enable
= rctx
->num_occlusion_queries
!= 0;
516 perfect_enable
= rctx
->num_perfect_occlusion_queries
!= 0;
518 if (enable
!= old_enable
|| perfect_enable
!= old_perfect_enable
) {
519 rctx
->set_occlusion_query_state(&rctx
->b
, enable
);
524 static unsigned event_type_for_stream(struct r600_query_hw
*query
)
526 switch (query
->stream
) {
528 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS
;
529 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1
;
530 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2
;
531 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3
;
535 static void r600_query_hw_do_emit_start(struct r600_common_context
*ctx
,
536 struct r600_query_hw
*query
,
537 struct r600_resource
*buffer
,
540 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
542 switch (query
->b
.type
) {
543 case PIPE_QUERY_OCCLUSION_COUNTER
:
544 case PIPE_QUERY_OCCLUSION_PREDICATE
:
545 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
546 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
548 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
550 case PIPE_QUERY_PRIMITIVES_EMITTED
:
551 case PIPE_QUERY_PRIMITIVES_GENERATED
:
552 case PIPE_QUERY_SO_STATISTICS
:
553 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
554 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
555 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
557 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
559 case PIPE_QUERY_TIME_ELAPSED
:
560 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
561 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
563 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
567 case PIPE_QUERY_PIPELINE_STATISTICS
:
568 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
569 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
571 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
576 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
580 static void r600_query_hw_emit_start(struct r600_common_context
*ctx
,
581 struct r600_query_hw
*query
)
585 if (!query
->buffer
.buf
)
586 return; // previous buffer allocation failure
588 r600_update_occlusion_query_state(ctx
, query
->b
.type
, 1);
589 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, 1);
591 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_begin
+ query
->num_cs_dw_end
,
594 /* Get a new query buffer if needed. */
595 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
596 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
597 *qbuf
= query
->buffer
;
598 query
->buffer
.results_end
= 0;
599 query
->buffer
.previous
= qbuf
;
600 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
);
601 if (!query
->buffer
.buf
)
605 /* emit begin query */
606 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
608 query
->ops
->emit_start(ctx
, query
, query
->buffer
.buf
, va
);
610 ctx
->num_cs_dw_queries_suspend
+= query
->num_cs_dw_end
;
613 static void r600_query_hw_do_emit_stop(struct r600_common_context
*ctx
,
614 struct r600_query_hw
*query
,
615 struct r600_resource
*buffer
,
618 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
619 uint64_t fence_va
= 0;
621 switch (query
->b
.type
) {
622 case PIPE_QUERY_OCCLUSION_COUNTER
:
623 case PIPE_QUERY_OCCLUSION_PREDICATE
:
625 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
626 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
628 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
630 fence_va
= va
+ ctx
->max_db
* 16 - 8;
632 case PIPE_QUERY_PRIMITIVES_EMITTED
:
633 case PIPE_QUERY_PRIMITIVES_GENERATED
:
634 case PIPE_QUERY_SO_STATISTICS
:
635 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
636 va
+= query
->result_size
/2;
637 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
638 radeon_emit(cs
, EVENT_TYPE(event_type_for_stream(query
)) | EVENT_INDEX(3));
640 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
642 case PIPE_QUERY_TIME_ELAPSED
:
645 case PIPE_QUERY_TIMESTAMP
:
646 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
647 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS
) | EVENT_INDEX(5));
649 radeon_emit(cs
, (3 << 29) | ((va
>> 32) & 0xFFFF));
655 case PIPE_QUERY_PIPELINE_STATISTICS
: {
656 unsigned sample_size
= (query
->result_size
- 8) / 2;
659 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
660 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT
) | EVENT_INDEX(2));
662 radeon_emit(cs
, (va
>> 32) & 0xFFFF);
664 fence_va
= va
+ sample_size
;
670 r600_emit_reloc(ctx
, &ctx
->gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
,
674 r600_gfx_write_fence(ctx
, query
->buffer
.buf
, fence_va
, 0, 0x80000000);
677 static void r600_query_hw_emit_stop(struct r600_common_context
*ctx
,
678 struct r600_query_hw
*query
)
682 if (!query
->buffer
.buf
)
683 return; // previous buffer allocation failure
685 /* The queries which need begin already called this in begin_query. */
686 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
687 ctx
->need_gfx_cs_space(&ctx
->b
, query
->num_cs_dw_end
, false);
691 va
= query
->buffer
.buf
->gpu_address
+ query
->buffer
.results_end
;
693 query
->ops
->emit_stop(ctx
, query
, query
->buffer
.buf
, va
);
695 query
->buffer
.results_end
+= query
->result_size
;
697 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
698 ctx
->num_cs_dw_queries_suspend
-= query
->num_cs_dw_end
;
700 r600_update_occlusion_query_state(ctx
, query
->b
.type
, -1);
701 r600_update_prims_generated_query_state(ctx
, query
->b
.type
, -1);
704 static void r600_emit_query_predication(struct r600_common_context
*ctx
,
705 struct r600_atom
*atom
)
707 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
708 struct r600_query_hw
*query
= (struct r600_query_hw
*)ctx
->render_cond
;
709 struct r600_query_buffer
*qbuf
;
716 flag_wait
= ctx
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
717 ctx
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
;
719 switch (query
->b
.type
) {
720 case PIPE_QUERY_OCCLUSION_COUNTER
:
721 case PIPE_QUERY_OCCLUSION_PREDICATE
:
722 op
= PRED_OP(PREDICATION_OP_ZPASS
);
724 case PIPE_QUERY_PRIMITIVES_EMITTED
:
725 case PIPE_QUERY_PRIMITIVES_GENERATED
:
726 case PIPE_QUERY_SO_STATISTICS
:
727 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
728 op
= PRED_OP(PREDICATION_OP_PRIMCOUNT
);
735 /* if true then invert, see GL_ARB_conditional_render_inverted */
736 if (ctx
->render_cond_invert
)
737 op
|= PREDICATION_DRAW_NOT_VISIBLE
; /* Draw if not visable/overflow */
739 op
|= PREDICATION_DRAW_VISIBLE
; /* Draw if visable/overflow */
741 op
|= flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
;
743 /* emit predicate packets for all data blocks */
744 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
745 unsigned results_base
= 0;
746 uint64_t va
= qbuf
->buf
->gpu_address
;
748 while (results_base
< qbuf
->results_end
) {
749 radeon_emit(cs
, PKT3(PKT3_SET_PREDICATION
, 1, 0));
750 radeon_emit(cs
, va
+ results_base
);
751 radeon_emit(cs
, op
| (((va
+ results_base
) >> 32) & 0xFF));
752 r600_emit_reloc(ctx
, &ctx
->gfx
, qbuf
->buf
, RADEON_USAGE_READ
,
754 results_base
+= query
->result_size
;
756 /* set CONTINUE bit for all packets except the first */
757 op
|= PREDICATION_CONTINUE
;
762 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
, unsigned index
)
764 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
766 if (query_type
== PIPE_QUERY_TIMESTAMP_DISJOINT
||
767 query_type
== PIPE_QUERY_GPU_FINISHED
||
768 query_type
>= PIPE_QUERY_DRIVER_SPECIFIC
)
769 return r600_query_sw_create(ctx
, query_type
);
771 return r600_query_hw_create(rctx
, query_type
, index
);
774 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
776 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
777 struct r600_query
*rquery
= (struct r600_query
*)query
;
779 rquery
->ops
->destroy(rctx
, rquery
);
782 static boolean
r600_begin_query(struct pipe_context
*ctx
,
783 struct pipe_query
*query
)
785 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
786 struct r600_query
*rquery
= (struct r600_query
*)query
;
788 return rquery
->ops
->begin(rctx
, rquery
);
791 void r600_query_hw_reset_buffers(struct r600_common_context
*rctx
,
792 struct r600_query_hw
*query
)
794 struct r600_query_buffer
*prev
= query
->buffer
.previous
;
796 /* Discard the old query buffers. */
798 struct r600_query_buffer
*qbuf
= prev
;
799 prev
= prev
->previous
;
800 r600_resource_reference(&qbuf
->buf
, NULL
);
804 query
->buffer
.results_end
= 0;
805 query
->buffer
.previous
= NULL
;
807 /* Obtain a new buffer if the current one can't be mapped without a stall. */
808 if (r600_rings_is_buffer_referenced(rctx
, query
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
) ||
809 !rctx
->ws
->buffer_wait(query
->buffer
.buf
->buf
, 0, RADEON_USAGE_READWRITE
)) {
810 r600_resource_reference(&query
->buffer
.buf
, NULL
);
811 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query
);
813 if (!query
->ops
->prepare_buffer(rctx
, query
, query
->buffer
.buf
))
814 r600_resource_reference(&query
->buffer
.buf
, NULL
);
818 bool r600_query_hw_begin(struct r600_common_context
*rctx
,
819 struct r600_query
*rquery
)
821 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
823 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
) {
828 if (!(query
->flags
& R600_QUERY_HW_FLAG_BEGIN_RESUMES
))
829 r600_query_hw_reset_buffers(rctx
, query
);
831 r600_query_hw_emit_start(rctx
, query
);
832 if (!query
->buffer
.buf
)
835 LIST_ADDTAIL(&query
->list
, &rctx
->active_queries
);
839 static bool r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
841 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
842 struct r600_query
*rquery
= (struct r600_query
*)query
;
844 return rquery
->ops
->end(rctx
, rquery
);
847 bool r600_query_hw_end(struct r600_common_context
*rctx
,
848 struct r600_query
*rquery
)
850 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
852 if (query
->flags
& R600_QUERY_HW_FLAG_NO_START
)
853 r600_query_hw_reset_buffers(rctx
, query
);
855 r600_query_hw_emit_stop(rctx
, query
);
857 if (!(query
->flags
& R600_QUERY_HW_FLAG_NO_START
))
858 LIST_DELINIT(&query
->list
);
860 if (!query
->buffer
.buf
)
866 static void r600_get_hw_query_params(struct r600_common_context
*rctx
,
867 struct r600_query_hw
*rquery
, int index
,
868 struct r600_hw_query_params
*params
)
870 params
->pair_stride
= 0;
871 params
->pair_count
= 1;
873 switch (rquery
->b
.type
) {
874 case PIPE_QUERY_OCCLUSION_COUNTER
:
875 case PIPE_QUERY_OCCLUSION_PREDICATE
:
876 params
->start_offset
= 0;
877 params
->end_offset
= 8;
878 params
->fence_offset
= rctx
->max_db
* 16;
879 params
->pair_stride
= 16;
880 params
->pair_count
= rctx
->max_db
;
882 case PIPE_QUERY_TIME_ELAPSED
:
883 params
->start_offset
= 0;
884 params
->end_offset
= 8;
885 params
->fence_offset
= 16;
887 case PIPE_QUERY_TIMESTAMP
:
888 params
->start_offset
= 0;
889 params
->end_offset
= 0;
890 params
->fence_offset
= 8;
892 case PIPE_QUERY_PRIMITIVES_EMITTED
:
893 params
->start_offset
= 8;
894 params
->end_offset
= 24;
895 params
->fence_offset
= params
->end_offset
+ 4;
897 case PIPE_QUERY_PRIMITIVES_GENERATED
:
898 params
->start_offset
= 0;
899 params
->end_offset
= 16;
900 params
->fence_offset
= params
->end_offset
+ 4;
902 case PIPE_QUERY_SO_STATISTICS
:
903 params
->start_offset
= 8 - index
* 8;
904 params
->end_offset
= 24 - index
* 8;
905 params
->fence_offset
= params
->end_offset
+ 4;
907 case PIPE_QUERY_PIPELINE_STATISTICS
:
909 /* Offsets apply to EG+ */
910 static const unsigned offsets
[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
911 params
->start_offset
= offsets
[index
];
912 params
->end_offset
= 88 + offsets
[index
];
913 params
->fence_offset
= 2 * 88;
917 unreachable("r600_get_hw_query_params unsupported");
921 static unsigned r600_query_read_result(void *map
, unsigned start_index
, unsigned end_index
,
922 bool test_status_bit
)
924 uint32_t *current_result
= (uint32_t*)map
;
927 start
= (uint64_t)current_result
[start_index
] |
928 (uint64_t)current_result
[start_index
+1] << 32;
929 end
= (uint64_t)current_result
[end_index
] |
930 (uint64_t)current_result
[end_index
+1] << 32;
932 if (!test_status_bit
||
933 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
939 static void r600_query_hw_add_result(struct r600_common_context
*ctx
,
940 struct r600_query_hw
*query
,
942 union pipe_query_result
*result
)
944 switch (query
->b
.type
) {
945 case PIPE_QUERY_OCCLUSION_COUNTER
: {
946 for (unsigned i
= 0; i
< ctx
->max_db
; ++i
) {
947 unsigned results_base
= i
* 16;
949 r600_query_read_result(buffer
+ results_base
, 0, 2, true);
953 case PIPE_QUERY_OCCLUSION_PREDICATE
: {
954 for (unsigned i
= 0; i
< ctx
->max_db
; ++i
) {
955 unsigned results_base
= i
* 16;
956 result
->b
= result
->b
||
957 r600_query_read_result(buffer
+ results_base
, 0, 2, true) != 0;
961 case PIPE_QUERY_TIME_ELAPSED
:
962 result
->u64
+= r600_query_read_result(buffer
, 0, 2, false);
964 case PIPE_QUERY_TIMESTAMP
:
965 result
->u64
= *(uint64_t*)buffer
;
967 case PIPE_QUERY_PRIMITIVES_EMITTED
:
968 /* SAMPLE_STREAMOUTSTATS stores this structure:
970 * u64 NumPrimitivesWritten;
971 * u64 PrimitiveStorageNeeded;
973 * We only need NumPrimitivesWritten here. */
974 result
->u64
+= r600_query_read_result(buffer
, 2, 6, true);
976 case PIPE_QUERY_PRIMITIVES_GENERATED
:
977 /* Here we read PrimitiveStorageNeeded. */
978 result
->u64
+= r600_query_read_result(buffer
, 0, 4, true);
980 case PIPE_QUERY_SO_STATISTICS
:
981 result
->so_statistics
.num_primitives_written
+=
982 r600_query_read_result(buffer
, 2, 6, true);
983 result
->so_statistics
.primitives_storage_needed
+=
984 r600_query_read_result(buffer
, 0, 4, true);
986 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
987 result
->b
= result
->b
||
988 r600_query_read_result(buffer
, 2, 6, true) !=
989 r600_query_read_result(buffer
, 0, 4, true);
991 case PIPE_QUERY_PIPELINE_STATISTICS
:
992 if (ctx
->chip_class
>= EVERGREEN
) {
993 result
->pipeline_statistics
.ps_invocations
+=
994 r600_query_read_result(buffer
, 0, 22, false);
995 result
->pipeline_statistics
.c_primitives
+=
996 r600_query_read_result(buffer
, 2, 24, false);
997 result
->pipeline_statistics
.c_invocations
+=
998 r600_query_read_result(buffer
, 4, 26, false);
999 result
->pipeline_statistics
.vs_invocations
+=
1000 r600_query_read_result(buffer
, 6, 28, false);
1001 result
->pipeline_statistics
.gs_invocations
+=
1002 r600_query_read_result(buffer
, 8, 30, false);
1003 result
->pipeline_statistics
.gs_primitives
+=
1004 r600_query_read_result(buffer
, 10, 32, false);
1005 result
->pipeline_statistics
.ia_primitives
+=
1006 r600_query_read_result(buffer
, 12, 34, false);
1007 result
->pipeline_statistics
.ia_vertices
+=
1008 r600_query_read_result(buffer
, 14, 36, false);
1009 result
->pipeline_statistics
.hs_invocations
+=
1010 r600_query_read_result(buffer
, 16, 38, false);
1011 result
->pipeline_statistics
.ds_invocations
+=
1012 r600_query_read_result(buffer
, 18, 40, false);
1013 result
->pipeline_statistics
.cs_invocations
+=
1014 r600_query_read_result(buffer
, 20, 42, false);
1016 result
->pipeline_statistics
.ps_invocations
+=
1017 r600_query_read_result(buffer
, 0, 16, false);
1018 result
->pipeline_statistics
.c_primitives
+=
1019 r600_query_read_result(buffer
, 2, 18, false);
1020 result
->pipeline_statistics
.c_invocations
+=
1021 r600_query_read_result(buffer
, 4, 20, false);
1022 result
->pipeline_statistics
.vs_invocations
+=
1023 r600_query_read_result(buffer
, 6, 22, false);
1024 result
->pipeline_statistics
.gs_invocations
+=
1025 r600_query_read_result(buffer
, 8, 24, false);
1026 result
->pipeline_statistics
.gs_primitives
+=
1027 r600_query_read_result(buffer
, 10, 26, false);
1028 result
->pipeline_statistics
.ia_primitives
+=
1029 r600_query_read_result(buffer
, 12, 28, false);
1030 result
->pipeline_statistics
.ia_vertices
+=
1031 r600_query_read_result(buffer
, 14, 30, false);
1033 #if 0 /* for testing */
1034 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1035 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1036 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1037 result
->pipeline_statistics
.ia_vertices
,
1038 result
->pipeline_statistics
.ia_primitives
,
1039 result
->pipeline_statistics
.vs_invocations
,
1040 result
->pipeline_statistics
.hs_invocations
,
1041 result
->pipeline_statistics
.ds_invocations
,
1042 result
->pipeline_statistics
.gs_invocations
,
1043 result
->pipeline_statistics
.gs_primitives
,
1044 result
->pipeline_statistics
.c_invocations
,
1045 result
->pipeline_statistics
.c_primitives
,
1046 result
->pipeline_statistics
.ps_invocations
,
1047 result
->pipeline_statistics
.cs_invocations
);
1055 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
1056 struct pipe_query
*query
, boolean wait
,
1057 union pipe_query_result
*result
)
1059 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1060 struct r600_query
*rquery
= (struct r600_query
*)query
;
1062 return rquery
->ops
->get_result(rctx
, rquery
, wait
, result
);
1065 static void r600_get_query_result_resource(struct pipe_context
*ctx
,
1066 struct pipe_query
*query
,
1068 enum pipe_query_value_type result_type
,
1070 struct pipe_resource
*resource
,
1073 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1074 struct r600_query
*rquery
= (struct r600_query
*)query
;
1076 rquery
->ops
->get_result_resource(rctx
, rquery
, wait
, result_type
, index
,
1080 static void r600_query_hw_clear_result(struct r600_query_hw
*query
,
1081 union pipe_query_result
*result
)
1083 util_query_clear_result(result
, query
->b
.type
);
1086 bool r600_query_hw_get_result(struct r600_common_context
*rctx
,
1087 struct r600_query
*rquery
,
1088 bool wait
, union pipe_query_result
*result
)
1090 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1091 struct r600_query_buffer
*qbuf
;
1093 query
->ops
->clear_result(query
, result
);
1095 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
1096 unsigned results_base
= 0;
1099 map
= r600_buffer_map_sync_with_rings(rctx
, qbuf
->buf
,
1100 PIPE_TRANSFER_READ
|
1101 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
1105 while (results_base
!= qbuf
->results_end
) {
1106 query
->ops
->add_result(rctx
, query
, map
+ results_base
,
1108 results_base
+= query
->result_size
;
1112 /* Convert the time to expected units. */
1113 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
1114 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
1115 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.clock_crystal_freq
;
1120 /* Create the compute shader that is used to collect the results.
1122 * One compute grid with a single thread is launched for every query result
1123 * buffer. The thread (optionally) reads a previous summary buffer, then
1124 * accumulates data from the query result buffer, and writes the result either
1125 * to a summary buffer to be consumed by the next grid invocation or to the
1126 * user-supplied buffer.
1132 * 0.y = result_stride
1133 * 0.z = result_count
1135 * 1: read previously accumulated values
1136 * 2: write accumulated values for chaining
1137 * 4: write result available
1138 * 8: convert result to boolean (0/1)
1139 * 16: only read one dword and use that as result
1140 * 32: apply timestamp conversion
1141 * 64: store full 64 bits result
1142 * 128: store signed 32 bits result
1143 * 1.x = fence_offset
1147 * BUFFER[0] = query result buffer
1148 * BUFFER[1] = previous summary buffer
1149 * BUFFER[2] = next summary buffer or user-supplied buffer
1151 static void r600_create_query_result_shader(struct r600_common_context
*rctx
)
1153 /* TEMP[0].xy = accumulated result so far
1154 * TEMP[0].z = result not available
1156 * TEMP[1].x = current result index
1157 * TEMP[1].y = current pair index
1159 static const char text_tmpl
[] =
1161 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1162 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1163 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1169 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1170 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1171 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1172 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1174 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1176 /* Check result availability. */
1177 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1178 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1179 "MOV TEMP[1], TEMP[0].zzzz\n"
1180 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1182 /* Load result if available. */
1184 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1187 /* Load previously accumulated result if requested. */
1188 "MOV TEMP[0], IMM[0].xxxx\n"
1189 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1191 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1194 "MOV TEMP[1].x, IMM[0].xxxx\n"
1196 /* Break if accumulated result so far is not available. */
1197 "UIF TEMP[0].zzzz\n"
1201 /* Break if result_index >= result_count. */
1202 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1207 /* Load fence and check result availability */
1208 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1209 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1210 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1211 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1212 "UIF TEMP[0].zzzz\n"
1216 "MOV TEMP[1].y, IMM[0].xxxx\n"
1218 /* Load start and end. */
1219 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1220 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1221 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1223 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1224 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1226 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1227 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1229 /* Increment pair index */
1230 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1231 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1237 /* Increment result index */
1238 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1242 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1244 /* Store accumulated data for chaining. */
1245 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1247 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1249 /* Store result availability. */
1250 "NOT TEMP[0].z, TEMP[0]\n"
1251 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1252 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1254 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1256 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1259 /* Store result if it is available. */
1260 "NOT TEMP[4], TEMP[0].zzzz\n"
1262 /* Apply timestamp conversion */
1263 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1265 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1266 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1269 /* Convert to boolean */
1270 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1272 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1273 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1274 "MOV TEMP[0].y, IMM[0].xxxx\n"
1277 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1279 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1282 "UIF TEMP[0].yyyy\n"
1283 "MOV TEMP[0].x, IMM[0].wwww\n"
1286 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1288 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1291 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1299 char text
[sizeof(text_tmpl
) + 32];
1300 struct tgsi_token tokens
[1024];
1301 struct pipe_compute_state state
= {};
1303 /* Hard code the frequency into the shader so that the backend can
1304 * use the full range of optimizations for divide-by-constant.
1306 snprintf(text
, sizeof(text
), text_tmpl
,
1307 rctx
->screen
->info
.clock_crystal_freq
);
1309 if (!tgsi_text_translate(text
, tokens
, ARRAY_SIZE(tokens
))) {
1314 state
.ir_type
= PIPE_SHADER_IR_TGSI
;
1315 state
.prog
= tokens
;
1317 rctx
->query_result_shader
= rctx
->b
.create_compute_state(&rctx
->b
, &state
);
1320 static void r600_restore_qbo_state(struct r600_common_context
*rctx
,
1321 struct r600_qbo_state
*st
)
1323 rctx
->b
.bind_compute_state(&rctx
->b
, st
->saved_compute
);
1325 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &st
->saved_const0
);
1326 pipe_resource_reference(&st
->saved_const0
.buffer
, NULL
);
1328 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, st
->saved_ssbo
);
1329 for (unsigned i
= 0; i
< 3; ++i
)
1330 pipe_resource_reference(&st
->saved_ssbo
[i
].buffer
, NULL
);
1333 static void r600_query_hw_get_result_resource(struct r600_common_context
*rctx
,
1334 struct r600_query
*rquery
,
1336 enum pipe_query_value_type result_type
,
1338 struct pipe_resource
*resource
,
1341 struct r600_query_hw
*query
= (struct r600_query_hw
*)rquery
;
1342 struct r600_query_buffer
*qbuf
;
1343 struct r600_query_buffer
*qbuf_prev
;
1344 struct pipe_resource
*tmp_buffer
= NULL
;
1345 unsigned tmp_buffer_offset
= 0;
1346 struct r600_qbo_state saved_state
= {};
1347 struct pipe_grid_info grid
= {};
1348 struct pipe_constant_buffer constant_buffer
= {};
1349 struct pipe_shader_buffer ssbo
[3];
1350 struct r600_hw_query_params params
;
1352 uint32_t end_offset
;
1353 uint32_t result_stride
;
1354 uint32_t result_count
;
1356 uint32_t fence_offset
;
1357 uint32_t pair_stride
;
1358 uint32_t pair_count
;
1361 if (!rctx
->query_result_shader
) {
1362 r600_create_query_result_shader(rctx
);
1363 if (!rctx
->query_result_shader
)
1367 if (query
->buffer
.previous
) {
1368 u_suballocator_alloc(rctx
->allocator_zeroed_memory
, 16, 16,
1369 &tmp_buffer_offset
, &tmp_buffer
);
1374 rctx
->save_qbo_state(&rctx
->b
, &saved_state
);
1376 r600_get_hw_query_params(rctx
, query
, index
>= 0 ? index
: 0, ¶ms
);
1377 consts
.end_offset
= params
.end_offset
- params
.start_offset
;
1378 consts
.fence_offset
= params
.fence_offset
- params
.start_offset
;
1379 consts
.result_stride
= query
->result_size
;
1380 consts
.pair_stride
= params
.pair_stride
;
1381 consts
.pair_count
= params
.pair_count
;
1383 constant_buffer
.buffer_size
= sizeof(consts
);
1384 constant_buffer
.user_buffer
= &consts
;
1386 ssbo
[1].buffer
= tmp_buffer
;
1387 ssbo
[1].buffer_offset
= tmp_buffer_offset
;
1388 ssbo
[1].buffer_size
= 16;
1392 rctx
->b
.bind_compute_state(&rctx
->b
, rctx
->query_result_shader
);
1404 if (query
->b
.type
== PIPE_QUERY_OCCLUSION_PREDICATE
||
1405 query
->b
.type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
)
1407 else if (query
->b
.type
== PIPE_QUERY_TIMESTAMP
||
1408 query
->b
.type
== PIPE_QUERY_TIME_ELAPSED
)
1409 consts
.config
|= 32;
1411 switch (result_type
) {
1412 case PIPE_QUERY_TYPE_U64
:
1413 case PIPE_QUERY_TYPE_I64
:
1414 consts
.config
|= 64;
1416 case PIPE_QUERY_TYPE_I32
:
1417 consts
.config
|= 128;
1419 case PIPE_QUERY_TYPE_U32
:
1423 rctx
->flags
|= rctx
->screen
->barrier_flags
.cp_to_L2
;
1425 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf_prev
) {
1426 if (query
->b
.type
!= PIPE_QUERY_TIMESTAMP
) {
1427 qbuf_prev
= qbuf
->previous
;
1428 consts
.result_count
= qbuf
->results_end
/ query
->result_size
;
1429 consts
.config
&= ~3;
1430 if (qbuf
!= &query
->buffer
)
1435 /* Only read the last timestamp. */
1437 consts
.result_count
= 0;
1438 consts
.config
|= 16;
1439 params
.start_offset
+= qbuf
->results_end
- query
->result_size
;
1442 rctx
->b
.set_constant_buffer(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, &constant_buffer
);
1444 ssbo
[0].buffer
= &qbuf
->buf
->b
.b
;
1445 ssbo
[0].buffer_offset
= params
.start_offset
;
1446 ssbo
[0].buffer_size
= qbuf
->results_end
- params
.start_offset
;
1448 if (!qbuf
->previous
) {
1449 ssbo
[2].buffer
= resource
;
1450 ssbo
[2].buffer_offset
= offset
;
1451 ssbo
[2].buffer_size
= 8;
1453 ((struct r600_resource
*)resource
)->TC_L2_dirty
= true;
1456 rctx
->b
.set_shader_buffers(&rctx
->b
, PIPE_SHADER_COMPUTE
, 0, 3, ssbo
);
1458 if (wait
&& qbuf
== &query
->buffer
) {
1461 /* Wait for result availability. Wait only for readiness
1462 * of the last entry, since the fence writes should be
1463 * serialized in the CP.
1465 va
= qbuf
->buf
->gpu_address
+ qbuf
->results_end
- query
->result_size
;
1466 va
+= params
.fence_offset
;
1468 r600_gfx_wait_fence(rctx
, va
, 0x80000000, 0x80000000);
1471 rctx
->b
.launch_grid(&rctx
->b
, &grid
);
1472 rctx
->flags
|= rctx
->screen
->barrier_flags
.compute_to_L2
;
1475 r600_restore_qbo_state(rctx
, &saved_state
);
1476 pipe_resource_reference(&tmp_buffer
, NULL
);
1479 static void r600_render_condition(struct pipe_context
*ctx
,
1480 struct pipe_query
*query
,
1484 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
1485 struct r600_query_hw
*rquery
= (struct r600_query_hw
*)query
;
1486 struct r600_query_buffer
*qbuf
;
1487 struct r600_atom
*atom
= &rctx
->render_cond_atom
;
1489 rctx
->render_cond
= query
;
1490 rctx
->render_cond_invert
= condition
;
1491 rctx
->render_cond_mode
= mode
;
1493 /* Compute the size of SET_PREDICATION packets. */
1496 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
)
1497 atom
->num_dw
+= (qbuf
->results_end
/ rquery
->result_size
) * 5;
1500 rctx
->set_atom_dirty(rctx
, atom
, query
!= NULL
);
1503 void r600_suspend_queries(struct r600_common_context
*ctx
)
1505 struct r600_query_hw
*query
;
1507 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1508 r600_query_hw_emit_stop(ctx
, query
);
1510 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1513 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context
*ctx
,
1514 struct list_head
*query_list
)
1516 struct r600_query_hw
*query
;
1517 unsigned num_dw
= 0;
1519 LIST_FOR_EACH_ENTRY(query
, query_list
, list
) {
1521 num_dw
+= query
->num_cs_dw_begin
+ query
->num_cs_dw_end
;
1523 /* Workaround for the fact that
1524 * num_cs_dw_nontimer_queries_suspend is incremented for every
1525 * resumed query, which raises the bar in need_cs_space for
1526 * queries about to be resumed.
1528 num_dw
+= query
->num_cs_dw_end
;
1530 /* primitives generated query */
1531 num_dw
+= ctx
->streamout
.enable_atom
.num_dw
;
1532 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1538 void r600_resume_queries(struct r600_common_context
*ctx
)
1540 struct r600_query_hw
*query
;
1541 unsigned num_cs_dw
= r600_queries_num_cs_dw_for_resuming(ctx
, &ctx
->active_queries
);
1543 assert(ctx
->num_cs_dw_queries_suspend
== 0);
1545 /* Check CS space here. Resuming must not be interrupted by flushes. */
1546 ctx
->need_gfx_cs_space(&ctx
->b
, num_cs_dw
, true);
1548 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_queries
, list
) {
1549 r600_query_hw_emit_start(ctx
, query
);
1553 /* Get backends mask */
1554 void r600_query_init_backend_mask(struct r600_common_context
*ctx
)
1556 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
1557 struct r600_resource
*buffer
;
1559 unsigned num_backends
= ctx
->screen
->info
.num_render_backends
;
1560 unsigned i
, mask
= 0;
1562 /* if backend_map query is supported by the kernel */
1563 if (ctx
->screen
->info
.r600_gb_backend_map_valid
) {
1564 unsigned num_tile_pipes
= ctx
->screen
->info
.num_tile_pipes
;
1565 unsigned backend_map
= ctx
->screen
->info
.r600_gb_backend_map
;
1566 unsigned item_width
, item_mask
;
1568 if (ctx
->chip_class
>= EVERGREEN
) {
1576 while (num_tile_pipes
--) {
1577 i
= backend_map
& item_mask
;
1579 backend_map
>>= item_width
;
1582 ctx
->backend_mask
= mask
;
1587 /* otherwise backup path for older kernels */
1589 /* create buffer for event data */
1590 buffer
= (struct r600_resource
*)
1591 pipe_buffer_create(ctx
->b
.screen
, PIPE_BIND_CUSTOM
,
1592 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
1596 /* initialize buffer with zeroes */
1597 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
1599 memset(results
, 0, ctx
->max_db
* 4 * 4);
1601 /* emit EVENT_WRITE for ZPASS_DONE */
1602 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
1603 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
1604 radeon_emit(cs
, buffer
->gpu_address
);
1605 radeon_emit(cs
, buffer
->gpu_address
>> 32);
1607 r600_emit_reloc(ctx
, &ctx
->gfx
, buffer
,
1608 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
1610 /* analyze results */
1611 results
= r600_buffer_map_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
1613 for(i
= 0; i
< ctx
->max_db
; i
++) {
1614 /* at least highest bit will be set if backend is used */
1615 if (results
[i
*4 + 1])
1621 r600_resource_reference(&buffer
, NULL
);
1624 ctx
->backend_mask
= mask
;
1629 /* fallback to old method - set num_backends lower bits to 1 */
1630 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
1634 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1637 .query_type = R600_QUERY_##query_type_, \
1638 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1639 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1640 .group_id = group_id_ \
1643 #define X(name_, query_type_, type_, result_type_) \
1644 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1646 #define XG(group_, name_, query_type_, type_, result_type_) \
1647 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1649 static struct pipe_driver_query_info r600_driver_query_list
[] = {
1650 X("num-compilations", NUM_COMPILATIONS
, UINT64
, CUMULATIVE
),
1651 X("num-shaders-created", NUM_SHADERS_CREATED
, UINT64
, CUMULATIVE
),
1652 X("draw-calls", DRAW_CALLS
, UINT64
, AVERAGE
),
1653 X("spill-draw-calls", SPILL_DRAW_CALLS
, UINT64
, AVERAGE
),
1654 X("compute-calls", COMPUTE_CALLS
, UINT64
, AVERAGE
),
1655 X("spill-compute-calls", SPILL_COMPUTE_CALLS
, UINT64
, AVERAGE
),
1656 X("dma-calls", DMA_CALLS
, UINT64
, AVERAGE
),
1657 X("num-vs-flushes", NUM_VS_FLUSHES
, UINT64
, AVERAGE
),
1658 X("num-ps-flushes", NUM_PS_FLUSHES
, UINT64
, AVERAGE
),
1659 X("num-cs-flushes", NUM_CS_FLUSHES
, UINT64
, AVERAGE
),
1660 X("requested-VRAM", REQUESTED_VRAM
, BYTES
, AVERAGE
),
1661 X("requested-GTT", REQUESTED_GTT
, BYTES
, AVERAGE
),
1662 X("mapped-VRAM", MAPPED_VRAM
, BYTES
, AVERAGE
),
1663 X("mapped-GTT", MAPPED_GTT
, BYTES
, AVERAGE
),
1664 X("buffer-wait-time", BUFFER_WAIT_TIME
, MICROSECONDS
, CUMULATIVE
),
1665 X("num-ctx-flushes", NUM_CTX_FLUSHES
, UINT64
, AVERAGE
),
1666 X("num-bytes-moved", NUM_BYTES_MOVED
, BYTES
, CUMULATIVE
),
1667 X("num-evictions", NUM_EVICTIONS
, UINT64
, CUMULATIVE
),
1668 X("VRAM-usage", VRAM_USAGE
, BYTES
, AVERAGE
),
1669 X("GTT-usage", GTT_USAGE
, BYTES
, AVERAGE
),
1670 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO
, UINT64
, AVERAGE
),
1672 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1673 * which use it as a fallback path to detect the GPU type.
1675 * Note: The names of these queries are significant for GPUPerfStudio
1676 * (and possibly their order as well). */
1677 XG(GPIN
, "GPIN_000", GPIN_ASIC_ID
, UINT
, AVERAGE
),
1678 XG(GPIN
, "GPIN_001", GPIN_NUM_SIMD
, UINT
, AVERAGE
),
1679 XG(GPIN
, "GPIN_002", GPIN_NUM_RB
, UINT
, AVERAGE
),
1680 XG(GPIN
, "GPIN_003", GPIN_NUM_SPI
, UINT
, AVERAGE
),
1681 XG(GPIN
, "GPIN_004", GPIN_NUM_SE
, UINT
, AVERAGE
),
1683 /* The following queries must be at the end of the list because their
1684 * availability is adjusted dynamically based on the DRM version. */
1685 X("GPU-load", GPU_LOAD
, UINT64
, AVERAGE
),
1686 X("temperature", GPU_TEMPERATURE
, UINT64
, AVERAGE
),
1687 X("shader-clock", CURRENT_GPU_SCLK
, HZ
, AVERAGE
),
1688 X("memory-clock", CURRENT_GPU_MCLK
, HZ
, AVERAGE
),
1695 static unsigned r600_get_num_queries(struct r600_common_screen
*rscreen
)
1697 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 42)
1698 return ARRAY_SIZE(r600_driver_query_list
);
1699 else if (rscreen
->info
.drm_major
== 3)
1700 return ARRAY_SIZE(r600_driver_query_list
) - 3;
1702 return ARRAY_SIZE(r600_driver_query_list
) - 4;
1705 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
1707 struct pipe_driver_query_info
*info
)
1709 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1710 unsigned num_queries
= r600_get_num_queries(rscreen
);
1713 unsigned num_perfcounters
=
1714 r600_get_perfcounter_info(rscreen
, 0, NULL
);
1716 return num_queries
+ num_perfcounters
;
1719 if (index
>= num_queries
)
1720 return r600_get_perfcounter_info(rscreen
, index
- num_queries
, info
);
1722 *info
= r600_driver_query_list
[index
];
1724 switch (info
->query_type
) {
1725 case R600_QUERY_REQUESTED_VRAM
:
1726 case R600_QUERY_VRAM_USAGE
:
1727 case R600_QUERY_MAPPED_VRAM
:
1728 info
->max_value
.u64
= rscreen
->info
.vram_size
;
1730 case R600_QUERY_REQUESTED_GTT
:
1731 case R600_QUERY_GTT_USAGE
:
1732 case R600_QUERY_MAPPED_GTT
:
1733 info
->max_value
.u64
= rscreen
->info
.gart_size
;
1735 case R600_QUERY_GPU_TEMPERATURE
:
1736 info
->max_value
.u64
= 125;
1740 if (info
->group_id
!= ~(unsigned)0 && rscreen
->perfcounters
)
1741 info
->group_id
+= rscreen
->perfcounters
->num_groups
;
1746 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1747 * performance counter groups, so be careful when changing this and related
1750 static int r600_get_driver_query_group_info(struct pipe_screen
*screen
,
1752 struct pipe_driver_query_group_info
*info
)
1754 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1755 unsigned num_pc_groups
= 0;
1757 if (rscreen
->perfcounters
)
1758 num_pc_groups
= rscreen
->perfcounters
->num_groups
;
1761 return num_pc_groups
+ R600_NUM_SW_QUERY_GROUPS
;
1763 if (index
< num_pc_groups
)
1764 return r600_get_perfcounter_group_info(rscreen
, index
, info
);
1766 index
-= num_pc_groups
;
1767 if (index
>= R600_NUM_SW_QUERY_GROUPS
)
1770 info
->name
= "GPIN";
1771 info
->max_active_queries
= 5;
1772 info
->num_queries
= 5;
1776 void r600_query_init(struct r600_common_context
*rctx
)
1778 rctx
->b
.create_query
= r600_create_query
;
1779 rctx
->b
.create_batch_query
= r600_create_batch_query
;
1780 rctx
->b
.destroy_query
= r600_destroy_query
;
1781 rctx
->b
.begin_query
= r600_begin_query
;
1782 rctx
->b
.end_query
= r600_end_query
;
1783 rctx
->b
.get_query_result
= r600_get_query_result
;
1784 rctx
->b
.get_query_result_resource
= r600_get_query_result_resource
;
1785 rctx
->render_cond_atom
.emit
= r600_emit_query_predication
;
1787 if (((struct r600_common_screen
*)rctx
->b
.screen
)->info
.num_render_backends
> 0)
1788 rctx
->b
.render_condition
= r600_render_condition
;
1790 LIST_INITHEAD(&rctx
->active_queries
);
1793 void r600_init_screen_query_functions(struct r600_common_screen
*rscreen
)
1795 rscreen
->b
.get_driver_query_info
= r600_get_driver_query_info
;
1796 rscreen
->b
.get_driver_query_group_info
= r600_get_driver_query_group_info
;