gallium/radeon: add new HUD queries for monitoring the CP
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46 /* Fence for GPU_FINISHED. */
47 struct pipe_fence_handle *fence;
48 };
49
50 static void r600_query_sw_destroy(struct r600_common_context *rctx,
51 struct r600_query *rquery)
52 {
53 struct pipe_screen *screen = rctx->b.screen;
54 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
55
56 screen->fence_reference(screen, &query->fence, NULL);
57 FREE(query);
58 }
59
60 static enum radeon_value_id winsys_id_from_type(unsigned type)
61 {
62 switch (type) {
63 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
64 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
65 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
66 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
67 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
68 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
69 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
70 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
71 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
72 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
73 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
74 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
75 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
76 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
77 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
78 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
79 default: unreachable("query type does not correspond to winsys id");
80 }
81 }
82
83 static bool r600_query_sw_begin(struct r600_common_context *rctx,
84 struct r600_query *rquery)
85 {
86 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
87
88 switch(query->b.type) {
89 case PIPE_QUERY_TIMESTAMP_DISJOINT:
90 case PIPE_QUERY_GPU_FINISHED:
91 break;
92 case R600_QUERY_DRAW_CALLS:
93 query->begin_result = rctx->num_draw_calls;
94 break;
95 case R600_QUERY_SPILL_DRAW_CALLS:
96 query->begin_result = rctx->num_spill_draw_calls;
97 break;
98 case R600_QUERY_COMPUTE_CALLS:
99 query->begin_result = rctx->num_compute_calls;
100 break;
101 case R600_QUERY_SPILL_COMPUTE_CALLS:
102 query->begin_result = rctx->num_spill_compute_calls;
103 break;
104 case R600_QUERY_DMA_CALLS:
105 query->begin_result = rctx->num_dma_calls;
106 break;
107 case R600_QUERY_CP_DMA_CALLS:
108 query->begin_result = rctx->num_cp_dma_calls;
109 break;
110 case R600_QUERY_NUM_VS_FLUSHES:
111 query->begin_result = rctx->num_vs_flushes;
112 break;
113 case R600_QUERY_NUM_PS_FLUSHES:
114 query->begin_result = rctx->num_ps_flushes;
115 break;
116 case R600_QUERY_NUM_CS_FLUSHES:
117 query->begin_result = rctx->num_cs_flushes;
118 break;
119 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
120 query->begin_result = rctx->num_fb_cache_flushes;
121 break;
122 case R600_QUERY_NUM_L2_INVALIDATES:
123 query->begin_result = rctx->num_L2_invalidates;
124 break;
125 case R600_QUERY_NUM_L2_WRITEBACKS:
126 query->begin_result = rctx->num_L2_writebacks;
127 break;
128 case R600_QUERY_REQUESTED_VRAM:
129 case R600_QUERY_REQUESTED_GTT:
130 case R600_QUERY_MAPPED_VRAM:
131 case R600_QUERY_MAPPED_GTT:
132 case R600_QUERY_VRAM_USAGE:
133 case R600_QUERY_VRAM_VIS_USAGE:
134 case R600_QUERY_GTT_USAGE:
135 case R600_QUERY_GPU_TEMPERATURE:
136 case R600_QUERY_CURRENT_GPU_SCLK:
137 case R600_QUERY_CURRENT_GPU_MCLK:
138 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
139 case R600_QUERY_NUM_MAPPED_BUFFERS:
140 query->begin_result = 0;
141 break;
142 case R600_QUERY_BUFFER_WAIT_TIME:
143 case R600_QUERY_NUM_GFX_IBS:
144 case R600_QUERY_NUM_SDMA_IBS:
145 case R600_QUERY_NUM_BYTES_MOVED:
146 case R600_QUERY_NUM_EVICTIONS: {
147 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
148 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
149 break;
150 }
151 case R600_QUERY_GPU_LOAD:
152 case R600_QUERY_GPU_SHADERS_BUSY:
153 case R600_QUERY_GPU_TA_BUSY:
154 case R600_QUERY_GPU_GDS_BUSY:
155 case R600_QUERY_GPU_VGT_BUSY:
156 case R600_QUERY_GPU_IA_BUSY:
157 case R600_QUERY_GPU_SX_BUSY:
158 case R600_QUERY_GPU_WD_BUSY:
159 case R600_QUERY_GPU_BCI_BUSY:
160 case R600_QUERY_GPU_SC_BUSY:
161 case R600_QUERY_GPU_PA_BUSY:
162 case R600_QUERY_GPU_DB_BUSY:
163 case R600_QUERY_GPU_CP_BUSY:
164 case R600_QUERY_GPU_CB_BUSY:
165 case R600_QUERY_GPU_SDMA_BUSY:
166 case R600_QUERY_GPU_PFP_BUSY:
167 case R600_QUERY_GPU_MEQ_BUSY:
168 case R600_QUERY_GPU_ME_BUSY:
169 case R600_QUERY_GPU_SURF_SYNC_BUSY:
170 case R600_QUERY_GPU_DMA_BUSY:
171 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
172 case R600_QUERY_GPU_CE_BUSY:
173 query->begin_result = r600_begin_counter(rctx->screen,
174 query->b.type);
175 break;
176 case R600_QUERY_NUM_COMPILATIONS:
177 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
178 break;
179 case R600_QUERY_NUM_SHADERS_CREATED:
180 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
181 break;
182 case R600_QUERY_NUM_SHADER_CACHE_HITS:
183 query->begin_result =
184 p_atomic_read(&rctx->screen->num_shader_cache_hits);
185 break;
186 case R600_QUERY_GPIN_ASIC_ID:
187 case R600_QUERY_GPIN_NUM_SIMD:
188 case R600_QUERY_GPIN_NUM_RB:
189 case R600_QUERY_GPIN_NUM_SPI:
190 case R600_QUERY_GPIN_NUM_SE:
191 break;
192 default:
193 unreachable("r600_query_sw_begin: bad query type");
194 }
195
196 return true;
197 }
198
199 static bool r600_query_sw_end(struct r600_common_context *rctx,
200 struct r600_query *rquery)
201 {
202 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
203
204 switch(query->b.type) {
205 case PIPE_QUERY_TIMESTAMP_DISJOINT:
206 break;
207 case PIPE_QUERY_GPU_FINISHED:
208 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
209 break;
210 case R600_QUERY_DRAW_CALLS:
211 query->end_result = rctx->num_draw_calls;
212 break;
213 case R600_QUERY_SPILL_DRAW_CALLS:
214 query->end_result = rctx->num_spill_draw_calls;
215 break;
216 case R600_QUERY_COMPUTE_CALLS:
217 query->end_result = rctx->num_compute_calls;
218 break;
219 case R600_QUERY_SPILL_COMPUTE_CALLS:
220 query->end_result = rctx->num_spill_compute_calls;
221 break;
222 case R600_QUERY_DMA_CALLS:
223 query->end_result = rctx->num_dma_calls;
224 break;
225 case R600_QUERY_CP_DMA_CALLS:
226 query->end_result = rctx->num_cp_dma_calls;
227 break;
228 case R600_QUERY_NUM_VS_FLUSHES:
229 query->end_result = rctx->num_vs_flushes;
230 break;
231 case R600_QUERY_NUM_PS_FLUSHES:
232 query->end_result = rctx->num_ps_flushes;
233 break;
234 case R600_QUERY_NUM_CS_FLUSHES:
235 query->end_result = rctx->num_cs_flushes;
236 break;
237 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
238 query->end_result = rctx->num_fb_cache_flushes;
239 break;
240 case R600_QUERY_NUM_L2_INVALIDATES:
241 query->end_result = rctx->num_L2_invalidates;
242 break;
243 case R600_QUERY_NUM_L2_WRITEBACKS:
244 query->end_result = rctx->num_L2_writebacks;
245 break;
246 case R600_QUERY_REQUESTED_VRAM:
247 case R600_QUERY_REQUESTED_GTT:
248 case R600_QUERY_MAPPED_VRAM:
249 case R600_QUERY_MAPPED_GTT:
250 case R600_QUERY_VRAM_USAGE:
251 case R600_QUERY_VRAM_VIS_USAGE:
252 case R600_QUERY_GTT_USAGE:
253 case R600_QUERY_GPU_TEMPERATURE:
254 case R600_QUERY_CURRENT_GPU_SCLK:
255 case R600_QUERY_CURRENT_GPU_MCLK:
256 case R600_QUERY_BUFFER_WAIT_TIME:
257 case R600_QUERY_NUM_MAPPED_BUFFERS:
258 case R600_QUERY_NUM_GFX_IBS:
259 case R600_QUERY_NUM_SDMA_IBS:
260 case R600_QUERY_NUM_BYTES_MOVED:
261 case R600_QUERY_NUM_EVICTIONS: {
262 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
263 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
264 break;
265 }
266 case R600_QUERY_GPU_LOAD:
267 case R600_QUERY_GPU_SHADERS_BUSY:
268 case R600_QUERY_GPU_TA_BUSY:
269 case R600_QUERY_GPU_GDS_BUSY:
270 case R600_QUERY_GPU_VGT_BUSY:
271 case R600_QUERY_GPU_IA_BUSY:
272 case R600_QUERY_GPU_SX_BUSY:
273 case R600_QUERY_GPU_WD_BUSY:
274 case R600_QUERY_GPU_BCI_BUSY:
275 case R600_QUERY_GPU_SC_BUSY:
276 case R600_QUERY_GPU_PA_BUSY:
277 case R600_QUERY_GPU_DB_BUSY:
278 case R600_QUERY_GPU_CP_BUSY:
279 case R600_QUERY_GPU_CB_BUSY:
280 case R600_QUERY_GPU_SDMA_BUSY:
281 case R600_QUERY_GPU_PFP_BUSY:
282 case R600_QUERY_GPU_MEQ_BUSY:
283 case R600_QUERY_GPU_ME_BUSY:
284 case R600_QUERY_GPU_SURF_SYNC_BUSY:
285 case R600_QUERY_GPU_DMA_BUSY:
286 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
287 case R600_QUERY_GPU_CE_BUSY:
288 query->end_result = r600_end_counter(rctx->screen,
289 query->b.type,
290 query->begin_result);
291 query->begin_result = 0;
292 break;
293 case R600_QUERY_NUM_COMPILATIONS:
294 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
295 break;
296 case R600_QUERY_NUM_SHADERS_CREATED:
297 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
298 break;
299 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
300 query->end_result = rctx->last_tex_ps_draw_ratio;
301 break;
302 case R600_QUERY_NUM_SHADER_CACHE_HITS:
303 query->end_result =
304 p_atomic_read(&rctx->screen->num_shader_cache_hits);
305 break;
306 case R600_QUERY_GPIN_ASIC_ID:
307 case R600_QUERY_GPIN_NUM_SIMD:
308 case R600_QUERY_GPIN_NUM_RB:
309 case R600_QUERY_GPIN_NUM_SPI:
310 case R600_QUERY_GPIN_NUM_SE:
311 break;
312 default:
313 unreachable("r600_query_sw_end: bad query type");
314 }
315
316 return true;
317 }
318
319 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
320 struct r600_query *rquery,
321 bool wait,
322 union pipe_query_result *result)
323 {
324 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
325
326 switch (query->b.type) {
327 case PIPE_QUERY_TIMESTAMP_DISJOINT:
328 /* Convert from cycles per millisecond to cycles per second (Hz). */
329 result->timestamp_disjoint.frequency =
330 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
331 result->timestamp_disjoint.disjoint = false;
332 return true;
333 case PIPE_QUERY_GPU_FINISHED: {
334 struct pipe_screen *screen = rctx->b.screen;
335 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
336 wait ? PIPE_TIMEOUT_INFINITE : 0);
337 return result->b;
338 }
339
340 case R600_QUERY_GPIN_ASIC_ID:
341 result->u32 = 0;
342 return true;
343 case R600_QUERY_GPIN_NUM_SIMD:
344 result->u32 = rctx->screen->info.num_good_compute_units;
345 return true;
346 case R600_QUERY_GPIN_NUM_RB:
347 result->u32 = rctx->screen->info.num_render_backends;
348 return true;
349 case R600_QUERY_GPIN_NUM_SPI:
350 result->u32 = 1; /* all supported chips have one SPI per SE */
351 return true;
352 case R600_QUERY_GPIN_NUM_SE:
353 result->u32 = rctx->screen->info.max_se;
354 return true;
355 }
356
357 result->u64 = query->end_result - query->begin_result;
358
359 switch (query->b.type) {
360 case R600_QUERY_BUFFER_WAIT_TIME:
361 case R600_QUERY_GPU_TEMPERATURE:
362 result->u64 /= 1000;
363 break;
364 case R600_QUERY_CURRENT_GPU_SCLK:
365 case R600_QUERY_CURRENT_GPU_MCLK:
366 result->u64 *= 1000000;
367 break;
368 }
369
370 return true;
371 }
372
373
374 static struct r600_query_ops sw_query_ops = {
375 .destroy = r600_query_sw_destroy,
376 .begin = r600_query_sw_begin,
377 .end = r600_query_sw_end,
378 .get_result = r600_query_sw_get_result,
379 .get_result_resource = NULL
380 };
381
382 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
383 unsigned query_type)
384 {
385 struct r600_query_sw *query;
386
387 query = CALLOC_STRUCT(r600_query_sw);
388 if (!query)
389 return NULL;
390
391 query->b.type = query_type;
392 query->b.ops = &sw_query_ops;
393
394 return (struct pipe_query *)query;
395 }
396
397 void r600_query_hw_destroy(struct r600_common_context *rctx,
398 struct r600_query *rquery)
399 {
400 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
401 struct r600_query_buffer *prev = query->buffer.previous;
402
403 /* Release all query buffers. */
404 while (prev) {
405 struct r600_query_buffer *qbuf = prev;
406 prev = prev->previous;
407 r600_resource_reference(&qbuf->buf, NULL);
408 FREE(qbuf);
409 }
410
411 r600_resource_reference(&query->buffer.buf, NULL);
412 FREE(rquery);
413 }
414
415 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
416 struct r600_query_hw *query)
417 {
418 unsigned buf_size = MAX2(query->result_size,
419 ctx->screen->info.min_alloc_size);
420
421 /* Queries are normally read by the CPU after
422 * being written by the gpu, hence staging is probably a good
423 * usage pattern.
424 */
425 struct r600_resource *buf = (struct r600_resource*)
426 pipe_buffer_create(ctx->b.screen, 0,
427 PIPE_USAGE_STAGING, buf_size);
428 if (!buf)
429 return NULL;
430
431 if (!query->ops->prepare_buffer(ctx, query, buf)) {
432 r600_resource_reference(&buf, NULL);
433 return NULL;
434 }
435
436 return buf;
437 }
438
439 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
440 struct r600_query_hw *query,
441 struct r600_resource *buffer)
442 {
443 /* Callers ensure that the buffer is currently unused by the GPU. */
444 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
445 PIPE_TRANSFER_WRITE |
446 PIPE_TRANSFER_UNSYNCHRONIZED);
447 if (!results)
448 return false;
449
450 memset(results, 0, buffer->b.b.width0);
451
452 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
453 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
454 unsigned max_rbs = ctx->screen->info.num_render_backends;
455 unsigned enabled_rb_mask = ctx->screen->info.enabled_rb_mask;
456 unsigned num_results;
457 unsigned i, j;
458
459 /* Set top bits for unused backends. */
460 num_results = buffer->b.b.width0 / query->result_size;
461 for (j = 0; j < num_results; j++) {
462 for (i = 0; i < max_rbs; i++) {
463 if (!(enabled_rb_mask & (1<<i))) {
464 results[(i * 4)+1] = 0x80000000;
465 results[(i * 4)+3] = 0x80000000;
466 }
467 }
468 results += 4 * max_rbs;
469 }
470 }
471
472 return true;
473 }
474
475 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
476 struct r600_query *rquery,
477 bool wait,
478 enum pipe_query_value_type result_type,
479 int index,
480 struct pipe_resource *resource,
481 unsigned offset);
482
483 static struct r600_query_ops query_hw_ops = {
484 .destroy = r600_query_hw_destroy,
485 .begin = r600_query_hw_begin,
486 .end = r600_query_hw_end,
487 .get_result = r600_query_hw_get_result,
488 .get_result_resource = r600_query_hw_get_result_resource,
489 };
490
491 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
492 struct r600_query_hw *query,
493 struct r600_resource *buffer,
494 uint64_t va);
495 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
496 struct r600_query_hw *query,
497 struct r600_resource *buffer,
498 uint64_t va);
499 static void r600_query_hw_add_result(struct r600_common_context *ctx,
500 struct r600_query_hw *, void *buffer,
501 union pipe_query_result *result);
502 static void r600_query_hw_clear_result(struct r600_query_hw *,
503 union pipe_query_result *);
504
505 static struct r600_query_hw_ops query_hw_default_hw_ops = {
506 .prepare_buffer = r600_query_hw_prepare_buffer,
507 .emit_start = r600_query_hw_do_emit_start,
508 .emit_stop = r600_query_hw_do_emit_stop,
509 .clear_result = r600_query_hw_clear_result,
510 .add_result = r600_query_hw_add_result,
511 };
512
513 bool r600_query_hw_init(struct r600_common_context *rctx,
514 struct r600_query_hw *query)
515 {
516 query->buffer.buf = r600_new_query_buffer(rctx, query);
517 if (!query->buffer.buf)
518 return false;
519
520 return true;
521 }
522
523 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
524 unsigned query_type,
525 unsigned index)
526 {
527 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
528 if (!query)
529 return NULL;
530
531 query->b.type = query_type;
532 query->b.ops = &query_hw_ops;
533 query->ops = &query_hw_default_hw_ops;
534
535 switch (query_type) {
536 case PIPE_QUERY_OCCLUSION_COUNTER:
537 case PIPE_QUERY_OCCLUSION_PREDICATE:
538 query->result_size = 16 * rctx->screen->info.num_render_backends;
539 query->result_size += 16; /* for the fence + alignment */
540 query->num_cs_dw_begin = 6;
541 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
542 break;
543 case PIPE_QUERY_TIME_ELAPSED:
544 query->result_size = 24;
545 query->num_cs_dw_begin = 8;
546 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
547 break;
548 case PIPE_QUERY_TIMESTAMP:
549 query->result_size = 16;
550 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
551 query->flags = R600_QUERY_HW_FLAG_NO_START;
552 break;
553 case PIPE_QUERY_PRIMITIVES_EMITTED:
554 case PIPE_QUERY_PRIMITIVES_GENERATED:
555 case PIPE_QUERY_SO_STATISTICS:
556 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
557 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
558 query->result_size = 32;
559 query->num_cs_dw_begin = 6;
560 query->num_cs_dw_end = 6;
561 query->stream = index;
562 break;
563 case PIPE_QUERY_PIPELINE_STATISTICS:
564 /* 11 values on EG, 8 on R600. */
565 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
566 query->result_size += 8; /* for the fence + alignment */
567 query->num_cs_dw_begin = 6;
568 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
569 break;
570 default:
571 assert(0);
572 FREE(query);
573 return NULL;
574 }
575
576 if (!r600_query_hw_init(rctx, query)) {
577 FREE(query);
578 return NULL;
579 }
580
581 return (struct pipe_query *)query;
582 }
583
584 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
585 unsigned type, int diff)
586 {
587 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
588 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
589 bool old_enable = rctx->num_occlusion_queries != 0;
590 bool old_perfect_enable =
591 rctx->num_perfect_occlusion_queries != 0;
592 bool enable, perfect_enable;
593
594 rctx->num_occlusion_queries += diff;
595 assert(rctx->num_occlusion_queries >= 0);
596
597 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
598 rctx->num_perfect_occlusion_queries += diff;
599 assert(rctx->num_perfect_occlusion_queries >= 0);
600 }
601
602 enable = rctx->num_occlusion_queries != 0;
603 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
604
605 if (enable != old_enable || perfect_enable != old_perfect_enable) {
606 rctx->set_occlusion_query_state(&rctx->b, enable);
607 }
608 }
609 }
610
611 static unsigned event_type_for_stream(struct r600_query_hw *query)
612 {
613 switch (query->stream) {
614 default:
615 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
616 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
617 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
618 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
619 }
620 }
621
622 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
623 struct r600_query_hw *query,
624 struct r600_resource *buffer,
625 uint64_t va)
626 {
627 struct radeon_winsys_cs *cs = ctx->gfx.cs;
628
629 switch (query->b.type) {
630 case PIPE_QUERY_OCCLUSION_COUNTER:
631 case PIPE_QUERY_OCCLUSION_PREDICATE:
632 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
633 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
634 radeon_emit(cs, va);
635 radeon_emit(cs, (va >> 32) & 0xFFFF);
636 break;
637 case PIPE_QUERY_PRIMITIVES_EMITTED:
638 case PIPE_QUERY_PRIMITIVES_GENERATED:
639 case PIPE_QUERY_SO_STATISTICS:
640 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
641 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
642 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
643 radeon_emit(cs, va);
644 radeon_emit(cs, (va >> 32) & 0xFFFF);
645 break;
646 case PIPE_QUERY_TIME_ELAPSED:
647 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
648 0, 3, NULL, va, 0, 0);
649 break;
650 case PIPE_QUERY_PIPELINE_STATISTICS:
651 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
652 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
653 radeon_emit(cs, va);
654 radeon_emit(cs, (va >> 32) & 0xFFFF);
655 break;
656 default:
657 assert(0);
658 }
659 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
660 RADEON_PRIO_QUERY);
661 }
662
663 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
664 struct r600_query_hw *query)
665 {
666 uint64_t va;
667
668 if (!query->buffer.buf)
669 return; // previous buffer allocation failure
670
671 r600_update_occlusion_query_state(ctx, query->b.type, 1);
672 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
673
674 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
675 true);
676
677 /* Get a new query buffer if needed. */
678 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
679 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
680 *qbuf = query->buffer;
681 query->buffer.results_end = 0;
682 query->buffer.previous = qbuf;
683 query->buffer.buf = r600_new_query_buffer(ctx, query);
684 if (!query->buffer.buf)
685 return;
686 }
687
688 /* emit begin query */
689 va = query->buffer.buf->gpu_address + query->buffer.results_end;
690
691 query->ops->emit_start(ctx, query, query->buffer.buf, va);
692
693 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
694 }
695
696 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
697 struct r600_query_hw *query,
698 struct r600_resource *buffer,
699 uint64_t va)
700 {
701 struct radeon_winsys_cs *cs = ctx->gfx.cs;
702 uint64_t fence_va = 0;
703
704 switch (query->b.type) {
705 case PIPE_QUERY_OCCLUSION_COUNTER:
706 case PIPE_QUERY_OCCLUSION_PREDICATE:
707 va += 8;
708 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
709 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
710 radeon_emit(cs, va);
711 radeon_emit(cs, (va >> 32) & 0xFFFF);
712
713 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
714 break;
715 case PIPE_QUERY_PRIMITIVES_EMITTED:
716 case PIPE_QUERY_PRIMITIVES_GENERATED:
717 case PIPE_QUERY_SO_STATISTICS:
718 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
719 va += query->result_size/2;
720 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
721 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
722 radeon_emit(cs, va);
723 radeon_emit(cs, (va >> 32) & 0xFFFF);
724 break;
725 case PIPE_QUERY_TIME_ELAPSED:
726 va += 8;
727 /* fall through */
728 case PIPE_QUERY_TIMESTAMP:
729 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
730 0, 3, NULL, va, 0, 0);
731 fence_va = va + 8;
732 break;
733 case PIPE_QUERY_PIPELINE_STATISTICS: {
734 unsigned sample_size = (query->result_size - 8) / 2;
735
736 va += sample_size;
737 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
738 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
739 radeon_emit(cs, va);
740 radeon_emit(cs, (va >> 32) & 0xFFFF);
741
742 fence_va = va + sample_size;
743 break;
744 }
745 default:
746 assert(0);
747 }
748 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
749 RADEON_PRIO_QUERY);
750
751 if (fence_va)
752 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
753 query->buffer.buf, fence_va, 0, 0x80000000);
754 }
755
756 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
757 struct r600_query_hw *query)
758 {
759 uint64_t va;
760
761 if (!query->buffer.buf)
762 return; // previous buffer allocation failure
763
764 /* The queries which need begin already called this in begin_query. */
765 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
766 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
767 }
768
769 /* emit end query */
770 va = query->buffer.buf->gpu_address + query->buffer.results_end;
771
772 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
773
774 query->buffer.results_end += query->result_size;
775
776 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
777 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
778
779 r600_update_occlusion_query_state(ctx, query->b.type, -1);
780 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
781 }
782
783 static void r600_emit_query_predication(struct r600_common_context *ctx,
784 struct r600_atom *atom)
785 {
786 struct radeon_winsys_cs *cs = ctx->gfx.cs;
787 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
788 struct r600_query_buffer *qbuf;
789 uint32_t op;
790 bool flag_wait;
791
792 if (!query)
793 return;
794
795 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
796 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
797
798 switch (query->b.type) {
799 case PIPE_QUERY_OCCLUSION_COUNTER:
800 case PIPE_QUERY_OCCLUSION_PREDICATE:
801 op = PRED_OP(PREDICATION_OP_ZPASS);
802 break;
803 case PIPE_QUERY_PRIMITIVES_EMITTED:
804 case PIPE_QUERY_PRIMITIVES_GENERATED:
805 case PIPE_QUERY_SO_STATISTICS:
806 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
807 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
808 break;
809 default:
810 assert(0);
811 return;
812 }
813
814 /* if true then invert, see GL_ARB_conditional_render_inverted */
815 if (ctx->render_cond_invert)
816 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
817 else
818 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
819
820 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
821
822 /* emit predicate packets for all data blocks */
823 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
824 unsigned results_base = 0;
825 uint64_t va = qbuf->buf->gpu_address;
826
827 while (results_base < qbuf->results_end) {
828 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
829 radeon_emit(cs, va + results_base);
830 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
831 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
832 RADEON_PRIO_QUERY);
833 results_base += query->result_size;
834
835 /* set CONTINUE bit for all packets except the first */
836 op |= PREDICATION_CONTINUE;
837 }
838 }
839 }
840
841 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
842 {
843 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
844
845 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
846 query_type == PIPE_QUERY_GPU_FINISHED ||
847 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
848 return r600_query_sw_create(ctx, query_type);
849
850 return r600_query_hw_create(rctx, query_type, index);
851 }
852
853 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
854 {
855 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
856 struct r600_query *rquery = (struct r600_query *)query;
857
858 rquery->ops->destroy(rctx, rquery);
859 }
860
861 static boolean r600_begin_query(struct pipe_context *ctx,
862 struct pipe_query *query)
863 {
864 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
865 struct r600_query *rquery = (struct r600_query *)query;
866
867 return rquery->ops->begin(rctx, rquery);
868 }
869
870 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
871 struct r600_query_hw *query)
872 {
873 struct r600_query_buffer *prev = query->buffer.previous;
874
875 /* Discard the old query buffers. */
876 while (prev) {
877 struct r600_query_buffer *qbuf = prev;
878 prev = prev->previous;
879 r600_resource_reference(&qbuf->buf, NULL);
880 FREE(qbuf);
881 }
882
883 query->buffer.results_end = 0;
884 query->buffer.previous = NULL;
885
886 /* Obtain a new buffer if the current one can't be mapped without a stall. */
887 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
888 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
889 r600_resource_reference(&query->buffer.buf, NULL);
890 query->buffer.buf = r600_new_query_buffer(rctx, query);
891 } else {
892 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
893 r600_resource_reference(&query->buffer.buf, NULL);
894 }
895 }
896
897 bool r600_query_hw_begin(struct r600_common_context *rctx,
898 struct r600_query *rquery)
899 {
900 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
901
902 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
903 assert(0);
904 return false;
905 }
906
907 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
908 r600_query_hw_reset_buffers(rctx, query);
909
910 r600_query_hw_emit_start(rctx, query);
911 if (!query->buffer.buf)
912 return false;
913
914 LIST_ADDTAIL(&query->list, &rctx->active_queries);
915 return true;
916 }
917
918 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
919 {
920 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
921 struct r600_query *rquery = (struct r600_query *)query;
922
923 return rquery->ops->end(rctx, rquery);
924 }
925
926 bool r600_query_hw_end(struct r600_common_context *rctx,
927 struct r600_query *rquery)
928 {
929 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
930
931 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
932 r600_query_hw_reset_buffers(rctx, query);
933
934 r600_query_hw_emit_stop(rctx, query);
935
936 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
937 LIST_DELINIT(&query->list);
938
939 if (!query->buffer.buf)
940 return false;
941
942 return true;
943 }
944
945 static void r600_get_hw_query_params(struct r600_common_context *rctx,
946 struct r600_query_hw *rquery, int index,
947 struct r600_hw_query_params *params)
948 {
949 unsigned max_rbs = rctx->screen->info.num_render_backends;
950
951 params->pair_stride = 0;
952 params->pair_count = 1;
953
954 switch (rquery->b.type) {
955 case PIPE_QUERY_OCCLUSION_COUNTER:
956 case PIPE_QUERY_OCCLUSION_PREDICATE:
957 params->start_offset = 0;
958 params->end_offset = 8;
959 params->fence_offset = max_rbs * 16;
960 params->pair_stride = 16;
961 params->pair_count = max_rbs;
962 break;
963 case PIPE_QUERY_TIME_ELAPSED:
964 params->start_offset = 0;
965 params->end_offset = 8;
966 params->fence_offset = 16;
967 break;
968 case PIPE_QUERY_TIMESTAMP:
969 params->start_offset = 0;
970 params->end_offset = 0;
971 params->fence_offset = 8;
972 break;
973 case PIPE_QUERY_PRIMITIVES_EMITTED:
974 params->start_offset = 8;
975 params->end_offset = 24;
976 params->fence_offset = params->end_offset + 4;
977 break;
978 case PIPE_QUERY_PRIMITIVES_GENERATED:
979 params->start_offset = 0;
980 params->end_offset = 16;
981 params->fence_offset = params->end_offset + 4;
982 break;
983 case PIPE_QUERY_SO_STATISTICS:
984 params->start_offset = 8 - index * 8;
985 params->end_offset = 24 - index * 8;
986 params->fence_offset = params->end_offset + 4;
987 break;
988 case PIPE_QUERY_PIPELINE_STATISTICS:
989 {
990 /* Offsets apply to EG+ */
991 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
992 params->start_offset = offsets[index];
993 params->end_offset = 88 + offsets[index];
994 params->fence_offset = 2 * 88;
995 break;
996 }
997 default:
998 unreachable("r600_get_hw_query_params unsupported");
999 }
1000 }
1001
1002 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1003 bool test_status_bit)
1004 {
1005 uint32_t *current_result = (uint32_t*)map;
1006 uint64_t start, end;
1007
1008 start = (uint64_t)current_result[start_index] |
1009 (uint64_t)current_result[start_index+1] << 32;
1010 end = (uint64_t)current_result[end_index] |
1011 (uint64_t)current_result[end_index+1] << 32;
1012
1013 if (!test_status_bit ||
1014 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1015 return end - start;
1016 }
1017 return 0;
1018 }
1019
1020 static void r600_query_hw_add_result(struct r600_common_context *ctx,
1021 struct r600_query_hw *query,
1022 void *buffer,
1023 union pipe_query_result *result)
1024 {
1025 unsigned max_rbs = ctx->screen->info.num_render_backends;
1026
1027 switch (query->b.type) {
1028 case PIPE_QUERY_OCCLUSION_COUNTER: {
1029 for (unsigned i = 0; i < max_rbs; ++i) {
1030 unsigned results_base = i * 16;
1031 result->u64 +=
1032 r600_query_read_result(buffer + results_base, 0, 2, true);
1033 }
1034 break;
1035 }
1036 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1037 for (unsigned i = 0; i < max_rbs; ++i) {
1038 unsigned results_base = i * 16;
1039 result->b = result->b ||
1040 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1041 }
1042 break;
1043 }
1044 case PIPE_QUERY_TIME_ELAPSED:
1045 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1046 break;
1047 case PIPE_QUERY_TIMESTAMP:
1048 result->u64 = *(uint64_t*)buffer;
1049 break;
1050 case PIPE_QUERY_PRIMITIVES_EMITTED:
1051 /* SAMPLE_STREAMOUTSTATS stores this structure:
1052 * {
1053 * u64 NumPrimitivesWritten;
1054 * u64 PrimitiveStorageNeeded;
1055 * }
1056 * We only need NumPrimitivesWritten here. */
1057 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1058 break;
1059 case PIPE_QUERY_PRIMITIVES_GENERATED:
1060 /* Here we read PrimitiveStorageNeeded. */
1061 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1062 break;
1063 case PIPE_QUERY_SO_STATISTICS:
1064 result->so_statistics.num_primitives_written +=
1065 r600_query_read_result(buffer, 2, 6, true);
1066 result->so_statistics.primitives_storage_needed +=
1067 r600_query_read_result(buffer, 0, 4, true);
1068 break;
1069 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1070 result->b = result->b ||
1071 r600_query_read_result(buffer, 2, 6, true) !=
1072 r600_query_read_result(buffer, 0, 4, true);
1073 break;
1074 case PIPE_QUERY_PIPELINE_STATISTICS:
1075 if (ctx->chip_class >= EVERGREEN) {
1076 result->pipeline_statistics.ps_invocations +=
1077 r600_query_read_result(buffer, 0, 22, false);
1078 result->pipeline_statistics.c_primitives +=
1079 r600_query_read_result(buffer, 2, 24, false);
1080 result->pipeline_statistics.c_invocations +=
1081 r600_query_read_result(buffer, 4, 26, false);
1082 result->pipeline_statistics.vs_invocations +=
1083 r600_query_read_result(buffer, 6, 28, false);
1084 result->pipeline_statistics.gs_invocations +=
1085 r600_query_read_result(buffer, 8, 30, false);
1086 result->pipeline_statistics.gs_primitives +=
1087 r600_query_read_result(buffer, 10, 32, false);
1088 result->pipeline_statistics.ia_primitives +=
1089 r600_query_read_result(buffer, 12, 34, false);
1090 result->pipeline_statistics.ia_vertices +=
1091 r600_query_read_result(buffer, 14, 36, false);
1092 result->pipeline_statistics.hs_invocations +=
1093 r600_query_read_result(buffer, 16, 38, false);
1094 result->pipeline_statistics.ds_invocations +=
1095 r600_query_read_result(buffer, 18, 40, false);
1096 result->pipeline_statistics.cs_invocations +=
1097 r600_query_read_result(buffer, 20, 42, false);
1098 } else {
1099 result->pipeline_statistics.ps_invocations +=
1100 r600_query_read_result(buffer, 0, 16, false);
1101 result->pipeline_statistics.c_primitives +=
1102 r600_query_read_result(buffer, 2, 18, false);
1103 result->pipeline_statistics.c_invocations +=
1104 r600_query_read_result(buffer, 4, 20, false);
1105 result->pipeline_statistics.vs_invocations +=
1106 r600_query_read_result(buffer, 6, 22, false);
1107 result->pipeline_statistics.gs_invocations +=
1108 r600_query_read_result(buffer, 8, 24, false);
1109 result->pipeline_statistics.gs_primitives +=
1110 r600_query_read_result(buffer, 10, 26, false);
1111 result->pipeline_statistics.ia_primitives +=
1112 r600_query_read_result(buffer, 12, 28, false);
1113 result->pipeline_statistics.ia_vertices +=
1114 r600_query_read_result(buffer, 14, 30, false);
1115 }
1116 #if 0 /* for testing */
1117 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1118 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1119 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1120 result->pipeline_statistics.ia_vertices,
1121 result->pipeline_statistics.ia_primitives,
1122 result->pipeline_statistics.vs_invocations,
1123 result->pipeline_statistics.hs_invocations,
1124 result->pipeline_statistics.ds_invocations,
1125 result->pipeline_statistics.gs_invocations,
1126 result->pipeline_statistics.gs_primitives,
1127 result->pipeline_statistics.c_invocations,
1128 result->pipeline_statistics.c_primitives,
1129 result->pipeline_statistics.ps_invocations,
1130 result->pipeline_statistics.cs_invocations);
1131 #endif
1132 break;
1133 default:
1134 assert(0);
1135 }
1136 }
1137
1138 static boolean r600_get_query_result(struct pipe_context *ctx,
1139 struct pipe_query *query, boolean wait,
1140 union pipe_query_result *result)
1141 {
1142 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1143 struct r600_query *rquery = (struct r600_query *)query;
1144
1145 return rquery->ops->get_result(rctx, rquery, wait, result);
1146 }
1147
1148 static void r600_get_query_result_resource(struct pipe_context *ctx,
1149 struct pipe_query *query,
1150 boolean wait,
1151 enum pipe_query_value_type result_type,
1152 int index,
1153 struct pipe_resource *resource,
1154 unsigned offset)
1155 {
1156 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1157 struct r600_query *rquery = (struct r600_query *)query;
1158
1159 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1160 resource, offset);
1161 }
1162
1163 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1164 union pipe_query_result *result)
1165 {
1166 util_query_clear_result(result, query->b.type);
1167 }
1168
1169 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1170 struct r600_query *rquery,
1171 bool wait, union pipe_query_result *result)
1172 {
1173 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1174 struct r600_query_buffer *qbuf;
1175
1176 query->ops->clear_result(query, result);
1177
1178 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1179 unsigned results_base = 0;
1180 void *map;
1181
1182 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1183 PIPE_TRANSFER_READ |
1184 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1185 if (!map)
1186 return false;
1187
1188 while (results_base != qbuf->results_end) {
1189 query->ops->add_result(rctx, query, map + results_base,
1190 result);
1191 results_base += query->result_size;
1192 }
1193 }
1194
1195 /* Convert the time to expected units. */
1196 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1197 rquery->type == PIPE_QUERY_TIMESTAMP) {
1198 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1199 }
1200 return true;
1201 }
1202
1203 /* Create the compute shader that is used to collect the results.
1204 *
1205 * One compute grid with a single thread is launched for every query result
1206 * buffer. The thread (optionally) reads a previous summary buffer, then
1207 * accumulates data from the query result buffer, and writes the result either
1208 * to a summary buffer to be consumed by the next grid invocation or to the
1209 * user-supplied buffer.
1210 *
1211 * Data layout:
1212 *
1213 * CONST
1214 * 0.x = end_offset
1215 * 0.y = result_stride
1216 * 0.z = result_count
1217 * 0.w = bit field:
1218 * 1: read previously accumulated values
1219 * 2: write accumulated values for chaining
1220 * 4: write result available
1221 * 8: convert result to boolean (0/1)
1222 * 16: only read one dword and use that as result
1223 * 32: apply timestamp conversion
1224 * 64: store full 64 bits result
1225 * 128: store signed 32 bits result
1226 * 1.x = fence_offset
1227 * 1.y = pair_stride
1228 * 1.z = pair_count
1229 *
1230 * BUFFER[0] = query result buffer
1231 * BUFFER[1] = previous summary buffer
1232 * BUFFER[2] = next summary buffer or user-supplied buffer
1233 */
1234 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1235 {
1236 /* TEMP[0].xy = accumulated result so far
1237 * TEMP[0].z = result not available
1238 *
1239 * TEMP[1].x = current result index
1240 * TEMP[1].y = current pair index
1241 */
1242 static const char text_tmpl[] =
1243 "COMP\n"
1244 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1245 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1246 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1247 "DCL BUFFER[0]\n"
1248 "DCL BUFFER[1]\n"
1249 "DCL BUFFER[2]\n"
1250 "DCL CONST[0..1]\n"
1251 "DCL TEMP[0..5]\n"
1252 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1253 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1254 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1255 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1256
1257 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1258 "UIF TEMP[5]\n"
1259 /* Check result availability. */
1260 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1261 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1262 "MOV TEMP[1], TEMP[0].zzzz\n"
1263 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1264
1265 /* Load result if available. */
1266 "UIF TEMP[1]\n"
1267 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1268 "ENDIF\n"
1269 "ELSE\n"
1270 /* Load previously accumulated result if requested. */
1271 "MOV TEMP[0], IMM[0].xxxx\n"
1272 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1273 "UIF TEMP[4]\n"
1274 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1275 "ENDIF\n"
1276
1277 "MOV TEMP[1].x, IMM[0].xxxx\n"
1278 "BGNLOOP\n"
1279 /* Break if accumulated result so far is not available. */
1280 "UIF TEMP[0].zzzz\n"
1281 "BRK\n"
1282 "ENDIF\n"
1283
1284 /* Break if result_index >= result_count. */
1285 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1286 "UIF TEMP[5]\n"
1287 "BRK\n"
1288 "ENDIF\n"
1289
1290 /* Load fence and check result availability */
1291 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1292 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1293 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1294 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1295 "UIF TEMP[0].zzzz\n"
1296 "BRK\n"
1297 "ENDIF\n"
1298
1299 "MOV TEMP[1].y, IMM[0].xxxx\n"
1300 "BGNLOOP\n"
1301 /* Load start and end. */
1302 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1303 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1304 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1305
1306 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1307 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1308
1309 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1310 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1311
1312 /* Increment pair index */
1313 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1314 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1315 "UIF TEMP[5]\n"
1316 "BRK\n"
1317 "ENDIF\n"
1318 "ENDLOOP\n"
1319
1320 /* Increment result index */
1321 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1322 "ENDLOOP\n"
1323 "ENDIF\n"
1324
1325 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1326 "UIF TEMP[4]\n"
1327 /* Store accumulated data for chaining. */
1328 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1329 "ELSE\n"
1330 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1331 "UIF TEMP[4]\n"
1332 /* Store result availability. */
1333 "NOT TEMP[0].z, TEMP[0]\n"
1334 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1335 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1336
1337 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1338 "UIF TEMP[4]\n"
1339 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1340 "ENDIF\n"
1341 "ELSE\n"
1342 /* Store result if it is available. */
1343 "NOT TEMP[4], TEMP[0].zzzz\n"
1344 "UIF TEMP[4]\n"
1345 /* Apply timestamp conversion */
1346 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1347 "UIF TEMP[4]\n"
1348 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1349 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1350 "ENDIF\n"
1351
1352 /* Convert to boolean */
1353 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1354 "UIF TEMP[4]\n"
1355 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1356 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1357 "MOV TEMP[0].y, IMM[0].xxxx\n"
1358 "ENDIF\n"
1359
1360 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1361 "UIF TEMP[4]\n"
1362 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1363 "ELSE\n"
1364 /* Clamping */
1365 "UIF TEMP[0].yyyy\n"
1366 "MOV TEMP[0].x, IMM[0].wwww\n"
1367 "ENDIF\n"
1368
1369 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1370 "UIF TEMP[4]\n"
1371 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1372 "ENDIF\n"
1373
1374 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1375 "ENDIF\n"
1376 "ENDIF\n"
1377 "ENDIF\n"
1378 "ENDIF\n"
1379
1380 "END\n";
1381
1382 char text[sizeof(text_tmpl) + 32];
1383 struct tgsi_token tokens[1024];
1384 struct pipe_compute_state state = {};
1385
1386 /* Hard code the frequency into the shader so that the backend can
1387 * use the full range of optimizations for divide-by-constant.
1388 */
1389 snprintf(text, sizeof(text), text_tmpl,
1390 rctx->screen->info.clock_crystal_freq);
1391
1392 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1393 assert(false);
1394 return;
1395 }
1396
1397 state.ir_type = PIPE_SHADER_IR_TGSI;
1398 state.prog = tokens;
1399
1400 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1401 }
1402
1403 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1404 struct r600_qbo_state *st)
1405 {
1406 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1407
1408 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1409 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1410
1411 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1412 for (unsigned i = 0; i < 3; ++i)
1413 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1414 }
1415
1416 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1417 struct r600_query *rquery,
1418 bool wait,
1419 enum pipe_query_value_type result_type,
1420 int index,
1421 struct pipe_resource *resource,
1422 unsigned offset)
1423 {
1424 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1425 struct r600_query_buffer *qbuf;
1426 struct r600_query_buffer *qbuf_prev;
1427 struct pipe_resource *tmp_buffer = NULL;
1428 unsigned tmp_buffer_offset = 0;
1429 struct r600_qbo_state saved_state = {};
1430 struct pipe_grid_info grid = {};
1431 struct pipe_constant_buffer constant_buffer = {};
1432 struct pipe_shader_buffer ssbo[3];
1433 struct r600_hw_query_params params;
1434 struct {
1435 uint32_t end_offset;
1436 uint32_t result_stride;
1437 uint32_t result_count;
1438 uint32_t config;
1439 uint32_t fence_offset;
1440 uint32_t pair_stride;
1441 uint32_t pair_count;
1442 } consts;
1443
1444 if (!rctx->query_result_shader) {
1445 r600_create_query_result_shader(rctx);
1446 if (!rctx->query_result_shader)
1447 return;
1448 }
1449
1450 if (query->buffer.previous) {
1451 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1452 &tmp_buffer_offset, &tmp_buffer);
1453 if (!tmp_buffer)
1454 return;
1455 }
1456
1457 rctx->save_qbo_state(&rctx->b, &saved_state);
1458
1459 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1460 consts.end_offset = params.end_offset - params.start_offset;
1461 consts.fence_offset = params.fence_offset - params.start_offset;
1462 consts.result_stride = query->result_size;
1463 consts.pair_stride = params.pair_stride;
1464 consts.pair_count = params.pair_count;
1465
1466 constant_buffer.buffer_size = sizeof(consts);
1467 constant_buffer.user_buffer = &consts;
1468
1469 ssbo[1].buffer = tmp_buffer;
1470 ssbo[1].buffer_offset = tmp_buffer_offset;
1471 ssbo[1].buffer_size = 16;
1472
1473 ssbo[2] = ssbo[1];
1474
1475 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1476
1477 grid.block[0] = 1;
1478 grid.block[1] = 1;
1479 grid.block[2] = 1;
1480 grid.grid[0] = 1;
1481 grid.grid[1] = 1;
1482 grid.grid[2] = 1;
1483
1484 consts.config = 0;
1485 if (index < 0)
1486 consts.config |= 4;
1487 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1488 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1489 consts.config |= 8;
1490 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1491 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1492 consts.config |= 32;
1493
1494 switch (result_type) {
1495 case PIPE_QUERY_TYPE_U64:
1496 case PIPE_QUERY_TYPE_I64:
1497 consts.config |= 64;
1498 break;
1499 case PIPE_QUERY_TYPE_I32:
1500 consts.config |= 128;
1501 break;
1502 case PIPE_QUERY_TYPE_U32:
1503 break;
1504 }
1505
1506 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1507
1508 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1509 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1510 qbuf_prev = qbuf->previous;
1511 consts.result_count = qbuf->results_end / query->result_size;
1512 consts.config &= ~3;
1513 if (qbuf != &query->buffer)
1514 consts.config |= 1;
1515 if (qbuf->previous)
1516 consts.config |= 2;
1517 } else {
1518 /* Only read the last timestamp. */
1519 qbuf_prev = NULL;
1520 consts.result_count = 0;
1521 consts.config |= 16;
1522 params.start_offset += qbuf->results_end - query->result_size;
1523 }
1524
1525 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1526
1527 ssbo[0].buffer = &qbuf->buf->b.b;
1528 ssbo[0].buffer_offset = params.start_offset;
1529 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1530
1531 if (!qbuf->previous) {
1532 ssbo[2].buffer = resource;
1533 ssbo[2].buffer_offset = offset;
1534 ssbo[2].buffer_size = 8;
1535
1536 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1537 }
1538
1539 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1540
1541 if (wait && qbuf == &query->buffer) {
1542 uint64_t va;
1543
1544 /* Wait for result availability. Wait only for readiness
1545 * of the last entry, since the fence writes should be
1546 * serialized in the CP.
1547 */
1548 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1549 va += params.fence_offset;
1550
1551 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1552 }
1553
1554 rctx->b.launch_grid(&rctx->b, &grid);
1555 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1556 }
1557
1558 r600_restore_qbo_state(rctx, &saved_state);
1559 pipe_resource_reference(&tmp_buffer, NULL);
1560 }
1561
1562 static void r600_render_condition(struct pipe_context *ctx,
1563 struct pipe_query *query,
1564 boolean condition,
1565 uint mode)
1566 {
1567 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1568 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1569 struct r600_query_buffer *qbuf;
1570 struct r600_atom *atom = &rctx->render_cond_atom;
1571
1572 rctx->render_cond = query;
1573 rctx->render_cond_invert = condition;
1574 rctx->render_cond_mode = mode;
1575
1576 /* Compute the size of SET_PREDICATION packets. */
1577 atom->num_dw = 0;
1578 if (query) {
1579 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1580 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1581 }
1582
1583 rctx->set_atom_dirty(rctx, atom, query != NULL);
1584 }
1585
1586 void r600_suspend_queries(struct r600_common_context *ctx)
1587 {
1588 struct r600_query_hw *query;
1589
1590 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1591 r600_query_hw_emit_stop(ctx, query);
1592 }
1593 assert(ctx->num_cs_dw_queries_suspend == 0);
1594 }
1595
1596 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1597 struct list_head *query_list)
1598 {
1599 struct r600_query_hw *query;
1600 unsigned num_dw = 0;
1601
1602 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1603 /* begin + end */
1604 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1605
1606 /* Workaround for the fact that
1607 * num_cs_dw_nontimer_queries_suspend is incremented for every
1608 * resumed query, which raises the bar in need_cs_space for
1609 * queries about to be resumed.
1610 */
1611 num_dw += query->num_cs_dw_end;
1612 }
1613 /* primitives generated query */
1614 num_dw += ctx->streamout.enable_atom.num_dw;
1615 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1616 num_dw += 13;
1617
1618 return num_dw;
1619 }
1620
1621 void r600_resume_queries(struct r600_common_context *ctx)
1622 {
1623 struct r600_query_hw *query;
1624 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1625
1626 assert(ctx->num_cs_dw_queries_suspend == 0);
1627
1628 /* Check CS space here. Resuming must not be interrupted by flushes. */
1629 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1630
1631 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1632 r600_query_hw_emit_start(ctx, query);
1633 }
1634 }
1635
1636 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1637 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1638 {
1639 struct r600_common_context *ctx =
1640 (struct r600_common_context*)rscreen->aux_context;
1641 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1642 struct r600_resource *buffer;
1643 uint32_t *results;
1644 unsigned i, mask = 0;
1645 unsigned max_rbs = ctx->screen->info.num_render_backends;
1646
1647 assert(rscreen->chip_class <= CAYMAN);
1648
1649 /* if backend_map query is supported by the kernel */
1650 if (rscreen->info.r600_gb_backend_map_valid) {
1651 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1652 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1653 unsigned item_width, item_mask;
1654
1655 if (ctx->chip_class >= EVERGREEN) {
1656 item_width = 4;
1657 item_mask = 0x7;
1658 } else {
1659 item_width = 2;
1660 item_mask = 0x3;
1661 }
1662
1663 while (num_tile_pipes--) {
1664 i = backend_map & item_mask;
1665 mask |= (1<<i);
1666 backend_map >>= item_width;
1667 }
1668 if (mask != 0) {
1669 rscreen->info.enabled_rb_mask = mask;
1670 return;
1671 }
1672 }
1673
1674 /* otherwise backup path for older kernels */
1675
1676 /* create buffer for event data */
1677 buffer = (struct r600_resource*)
1678 pipe_buffer_create(ctx->b.screen, 0,
1679 PIPE_USAGE_STAGING, max_rbs * 16);
1680 if (!buffer)
1681 return;
1682
1683 /* initialize buffer with zeroes */
1684 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1685 if (results) {
1686 memset(results, 0, max_rbs * 4 * 4);
1687
1688 /* emit EVENT_WRITE for ZPASS_DONE */
1689 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1690 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1691 radeon_emit(cs, buffer->gpu_address);
1692 radeon_emit(cs, buffer->gpu_address >> 32);
1693
1694 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1695 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1696
1697 /* analyze results */
1698 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1699 if (results) {
1700 for(i = 0; i < max_rbs; i++) {
1701 /* at least highest bit will be set if backend is used */
1702 if (results[i*4 + 1])
1703 mask |= (1<<i);
1704 }
1705 }
1706 }
1707
1708 r600_resource_reference(&buffer, NULL);
1709
1710 if (mask)
1711 rscreen->info.enabled_rb_mask = mask;
1712 }
1713
1714 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1715 { \
1716 .name = name_, \
1717 .query_type = R600_QUERY_##query_type_, \
1718 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1719 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1720 .group_id = group_id_ \
1721 }
1722
1723 #define X(name_, query_type_, type_, result_type_) \
1724 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1725
1726 #define XG(group_, name_, query_type_, type_, result_type_) \
1727 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1728
1729 static struct pipe_driver_query_info r600_driver_query_list[] = {
1730 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1731 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1732 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1733 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1734 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1735 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1736 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1737 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1738 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1739 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1740 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1741 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1742 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES, UINT64, AVERAGE),
1743 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1744 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1745 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1746 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1747 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1748 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1749 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1750 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1751 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1752 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1753 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1754 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1755 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1756 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1757 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1758 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1759
1760 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1761 * which use it as a fallback path to detect the GPU type.
1762 *
1763 * Note: The names of these queries are significant for GPUPerfStudio
1764 * (and possibly their order as well). */
1765 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1766 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1767 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1768 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1769 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1770
1771 /* The following queries must be at the end of the list because their
1772 * availability is adjusted dynamically based on the DRM version. */
1773 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1774 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1775 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1776 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1777 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1778 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1779 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1780 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1781 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1782 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1783 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1784 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1785 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1786 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1787 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
1788 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
1789 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
1790 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
1791 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
1792 X("GPU-dma-busy", GPU_DMA_BUSY, UINT64, AVERAGE),
1793 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
1794 X("GPU-ce-busy", GPU_CE_BUSY, UINT64, AVERAGE),
1795
1796 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1797 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1798 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1799 };
1800
1801 #undef X
1802 #undef XG
1803 #undef XFULL
1804
1805 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1806 {
1807 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1808 return ARRAY_SIZE(r600_driver_query_list);
1809 else if (rscreen->info.drm_major == 3) {
1810 if (rscreen->chip_class >= VI)
1811 return ARRAY_SIZE(r600_driver_query_list) - 3;
1812 else
1813 return ARRAY_SIZE(r600_driver_query_list) - 10;
1814 }
1815 else
1816 return ARRAY_SIZE(r600_driver_query_list) - 25;
1817 }
1818
1819 static int r600_get_driver_query_info(struct pipe_screen *screen,
1820 unsigned index,
1821 struct pipe_driver_query_info *info)
1822 {
1823 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1824 unsigned num_queries = r600_get_num_queries(rscreen);
1825
1826 if (!info) {
1827 unsigned num_perfcounters =
1828 r600_get_perfcounter_info(rscreen, 0, NULL);
1829
1830 return num_queries + num_perfcounters;
1831 }
1832
1833 if (index >= num_queries)
1834 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1835
1836 *info = r600_driver_query_list[index];
1837
1838 switch (info->query_type) {
1839 case R600_QUERY_REQUESTED_VRAM:
1840 case R600_QUERY_VRAM_USAGE:
1841 case R600_QUERY_MAPPED_VRAM:
1842 info->max_value.u64 = rscreen->info.vram_size;
1843 break;
1844 case R600_QUERY_REQUESTED_GTT:
1845 case R600_QUERY_GTT_USAGE:
1846 case R600_QUERY_MAPPED_GTT:
1847 info->max_value.u64 = rscreen->info.gart_size;
1848 break;
1849 case R600_QUERY_GPU_TEMPERATURE:
1850 info->max_value.u64 = 125;
1851 break;
1852 case R600_QUERY_VRAM_VIS_USAGE:
1853 info->max_value.u64 = rscreen->info.vram_vis_size;
1854 break;
1855 }
1856
1857 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1858 info->group_id += rscreen->perfcounters->num_groups;
1859
1860 return 1;
1861 }
1862
1863 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1864 * performance counter groups, so be careful when changing this and related
1865 * functions.
1866 */
1867 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1868 unsigned index,
1869 struct pipe_driver_query_group_info *info)
1870 {
1871 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1872 unsigned num_pc_groups = 0;
1873
1874 if (rscreen->perfcounters)
1875 num_pc_groups = rscreen->perfcounters->num_groups;
1876
1877 if (!info)
1878 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1879
1880 if (index < num_pc_groups)
1881 return r600_get_perfcounter_group_info(rscreen, index, info);
1882
1883 index -= num_pc_groups;
1884 if (index >= R600_NUM_SW_QUERY_GROUPS)
1885 return 0;
1886
1887 info->name = "GPIN";
1888 info->max_active_queries = 5;
1889 info->num_queries = 5;
1890 return 1;
1891 }
1892
1893 void r600_query_init(struct r600_common_context *rctx)
1894 {
1895 rctx->b.create_query = r600_create_query;
1896 rctx->b.create_batch_query = r600_create_batch_query;
1897 rctx->b.destroy_query = r600_destroy_query;
1898 rctx->b.begin_query = r600_begin_query;
1899 rctx->b.end_query = r600_end_query;
1900 rctx->b.get_query_result = r600_get_query_result;
1901 rctx->b.get_query_result_resource = r600_get_query_result_resource;
1902 rctx->render_cond_atom.emit = r600_emit_query_predication;
1903
1904 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1905 rctx->b.render_condition = r600_render_condition;
1906
1907 LIST_INITHEAD(&rctx->active_queries);
1908 }
1909
1910 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1911 {
1912 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1913 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1914 }