gallium/radeon: add VRAM-vis-usage HUD query
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46 /* Fence for GPU_FINISHED. */
47 struct pipe_fence_handle *fence;
48 };
49
50 static void r600_query_sw_destroy(struct r600_common_context *rctx,
51 struct r600_query *rquery)
52 {
53 struct pipe_screen *screen = rctx->b.screen;
54 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
55
56 screen->fence_reference(screen, &query->fence, NULL);
57 FREE(query);
58 }
59
60 static enum radeon_value_id winsys_id_from_type(unsigned type)
61 {
62 switch (type) {
63 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
64 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
65 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
66 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
67 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
68 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
69 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
70 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
71 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
72 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
73 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
74 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
75 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
76 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
77 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
78 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
79 default: unreachable("query type does not correspond to winsys id");
80 }
81 }
82
83 static bool r600_query_sw_begin(struct r600_common_context *rctx,
84 struct r600_query *rquery)
85 {
86 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
87
88 switch(query->b.type) {
89 case PIPE_QUERY_TIMESTAMP_DISJOINT:
90 case PIPE_QUERY_GPU_FINISHED:
91 break;
92 case R600_QUERY_DRAW_CALLS:
93 query->begin_result = rctx->num_draw_calls;
94 break;
95 case R600_QUERY_SPILL_DRAW_CALLS:
96 query->begin_result = rctx->num_spill_draw_calls;
97 break;
98 case R600_QUERY_COMPUTE_CALLS:
99 query->begin_result = rctx->num_compute_calls;
100 break;
101 case R600_QUERY_SPILL_COMPUTE_CALLS:
102 query->begin_result = rctx->num_spill_compute_calls;
103 break;
104 case R600_QUERY_DMA_CALLS:
105 query->begin_result = rctx->num_dma_calls;
106 break;
107 case R600_QUERY_CP_DMA_CALLS:
108 query->begin_result = rctx->num_cp_dma_calls;
109 break;
110 case R600_QUERY_NUM_VS_FLUSHES:
111 query->begin_result = rctx->num_vs_flushes;
112 break;
113 case R600_QUERY_NUM_PS_FLUSHES:
114 query->begin_result = rctx->num_ps_flushes;
115 break;
116 case R600_QUERY_NUM_CS_FLUSHES:
117 query->begin_result = rctx->num_cs_flushes;
118 break;
119 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
120 query->begin_result = rctx->num_fb_cache_flushes;
121 break;
122 case R600_QUERY_NUM_L2_INVALIDATES:
123 query->begin_result = rctx->num_L2_invalidates;
124 break;
125 case R600_QUERY_NUM_L2_WRITEBACKS:
126 query->begin_result = rctx->num_L2_writebacks;
127 break;
128 case R600_QUERY_REQUESTED_VRAM:
129 case R600_QUERY_REQUESTED_GTT:
130 case R600_QUERY_MAPPED_VRAM:
131 case R600_QUERY_MAPPED_GTT:
132 case R600_QUERY_VRAM_USAGE:
133 case R600_QUERY_VRAM_VIS_USAGE:
134 case R600_QUERY_GTT_USAGE:
135 case R600_QUERY_GPU_TEMPERATURE:
136 case R600_QUERY_CURRENT_GPU_SCLK:
137 case R600_QUERY_CURRENT_GPU_MCLK:
138 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
139 case R600_QUERY_NUM_MAPPED_BUFFERS:
140 query->begin_result = 0;
141 break;
142 case R600_QUERY_BUFFER_WAIT_TIME:
143 case R600_QUERY_NUM_GFX_IBS:
144 case R600_QUERY_NUM_SDMA_IBS:
145 case R600_QUERY_NUM_BYTES_MOVED:
146 case R600_QUERY_NUM_EVICTIONS: {
147 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
148 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
149 break;
150 }
151 case R600_QUERY_GPU_LOAD:
152 case R600_QUERY_GPU_SHADERS_BUSY:
153 case R600_QUERY_GPU_TA_BUSY:
154 case R600_QUERY_GPU_GDS_BUSY:
155 case R600_QUERY_GPU_VGT_BUSY:
156 case R600_QUERY_GPU_IA_BUSY:
157 case R600_QUERY_GPU_SX_BUSY:
158 case R600_QUERY_GPU_WD_BUSY:
159 case R600_QUERY_GPU_BCI_BUSY:
160 case R600_QUERY_GPU_SC_BUSY:
161 case R600_QUERY_GPU_PA_BUSY:
162 case R600_QUERY_GPU_DB_BUSY:
163 case R600_QUERY_GPU_CP_BUSY:
164 case R600_QUERY_GPU_CB_BUSY:
165 query->begin_result = r600_begin_counter(rctx->screen,
166 query->b.type);
167 break;
168 case R600_QUERY_NUM_COMPILATIONS:
169 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
170 break;
171 case R600_QUERY_NUM_SHADERS_CREATED:
172 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
173 break;
174 case R600_QUERY_NUM_SHADER_CACHE_HITS:
175 query->begin_result =
176 p_atomic_read(&rctx->screen->num_shader_cache_hits);
177 break;
178 case R600_QUERY_GPIN_ASIC_ID:
179 case R600_QUERY_GPIN_NUM_SIMD:
180 case R600_QUERY_GPIN_NUM_RB:
181 case R600_QUERY_GPIN_NUM_SPI:
182 case R600_QUERY_GPIN_NUM_SE:
183 break;
184 default:
185 unreachable("r600_query_sw_begin: bad query type");
186 }
187
188 return true;
189 }
190
191 static bool r600_query_sw_end(struct r600_common_context *rctx,
192 struct r600_query *rquery)
193 {
194 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
195
196 switch(query->b.type) {
197 case PIPE_QUERY_TIMESTAMP_DISJOINT:
198 break;
199 case PIPE_QUERY_GPU_FINISHED:
200 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
201 break;
202 case R600_QUERY_DRAW_CALLS:
203 query->end_result = rctx->num_draw_calls;
204 break;
205 case R600_QUERY_SPILL_DRAW_CALLS:
206 query->end_result = rctx->num_spill_draw_calls;
207 break;
208 case R600_QUERY_COMPUTE_CALLS:
209 query->end_result = rctx->num_compute_calls;
210 break;
211 case R600_QUERY_SPILL_COMPUTE_CALLS:
212 query->end_result = rctx->num_spill_compute_calls;
213 break;
214 case R600_QUERY_DMA_CALLS:
215 query->end_result = rctx->num_dma_calls;
216 break;
217 case R600_QUERY_CP_DMA_CALLS:
218 query->end_result = rctx->num_cp_dma_calls;
219 break;
220 case R600_QUERY_NUM_VS_FLUSHES:
221 query->end_result = rctx->num_vs_flushes;
222 break;
223 case R600_QUERY_NUM_PS_FLUSHES:
224 query->end_result = rctx->num_ps_flushes;
225 break;
226 case R600_QUERY_NUM_CS_FLUSHES:
227 query->end_result = rctx->num_cs_flushes;
228 break;
229 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
230 query->end_result = rctx->num_fb_cache_flushes;
231 break;
232 case R600_QUERY_NUM_L2_INVALIDATES:
233 query->end_result = rctx->num_L2_invalidates;
234 break;
235 case R600_QUERY_NUM_L2_WRITEBACKS:
236 query->end_result = rctx->num_L2_writebacks;
237 break;
238 case R600_QUERY_REQUESTED_VRAM:
239 case R600_QUERY_REQUESTED_GTT:
240 case R600_QUERY_MAPPED_VRAM:
241 case R600_QUERY_MAPPED_GTT:
242 case R600_QUERY_VRAM_USAGE:
243 case R600_QUERY_VRAM_VIS_USAGE:
244 case R600_QUERY_GTT_USAGE:
245 case R600_QUERY_GPU_TEMPERATURE:
246 case R600_QUERY_CURRENT_GPU_SCLK:
247 case R600_QUERY_CURRENT_GPU_MCLK:
248 case R600_QUERY_BUFFER_WAIT_TIME:
249 case R600_QUERY_NUM_MAPPED_BUFFERS:
250 case R600_QUERY_NUM_GFX_IBS:
251 case R600_QUERY_NUM_SDMA_IBS:
252 case R600_QUERY_NUM_BYTES_MOVED:
253 case R600_QUERY_NUM_EVICTIONS: {
254 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
255 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
256 break;
257 }
258 case R600_QUERY_GPU_LOAD:
259 case R600_QUERY_GPU_SHADERS_BUSY:
260 case R600_QUERY_GPU_TA_BUSY:
261 case R600_QUERY_GPU_GDS_BUSY:
262 case R600_QUERY_GPU_VGT_BUSY:
263 case R600_QUERY_GPU_IA_BUSY:
264 case R600_QUERY_GPU_SX_BUSY:
265 case R600_QUERY_GPU_WD_BUSY:
266 case R600_QUERY_GPU_BCI_BUSY:
267 case R600_QUERY_GPU_SC_BUSY:
268 case R600_QUERY_GPU_PA_BUSY:
269 case R600_QUERY_GPU_DB_BUSY:
270 case R600_QUERY_GPU_CP_BUSY:
271 case R600_QUERY_GPU_CB_BUSY:
272 query->end_result = r600_end_counter(rctx->screen,
273 query->b.type,
274 query->begin_result);
275 query->begin_result = 0;
276 break;
277 case R600_QUERY_NUM_COMPILATIONS:
278 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
279 break;
280 case R600_QUERY_NUM_SHADERS_CREATED:
281 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
282 break;
283 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
284 query->end_result = rctx->last_tex_ps_draw_ratio;
285 break;
286 case R600_QUERY_NUM_SHADER_CACHE_HITS:
287 query->end_result =
288 p_atomic_read(&rctx->screen->num_shader_cache_hits);
289 break;
290 case R600_QUERY_GPIN_ASIC_ID:
291 case R600_QUERY_GPIN_NUM_SIMD:
292 case R600_QUERY_GPIN_NUM_RB:
293 case R600_QUERY_GPIN_NUM_SPI:
294 case R600_QUERY_GPIN_NUM_SE:
295 break;
296 default:
297 unreachable("r600_query_sw_end: bad query type");
298 }
299
300 return true;
301 }
302
303 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
304 struct r600_query *rquery,
305 bool wait,
306 union pipe_query_result *result)
307 {
308 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
309
310 switch (query->b.type) {
311 case PIPE_QUERY_TIMESTAMP_DISJOINT:
312 /* Convert from cycles per millisecond to cycles per second (Hz). */
313 result->timestamp_disjoint.frequency =
314 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
315 result->timestamp_disjoint.disjoint = false;
316 return true;
317 case PIPE_QUERY_GPU_FINISHED: {
318 struct pipe_screen *screen = rctx->b.screen;
319 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
320 wait ? PIPE_TIMEOUT_INFINITE : 0);
321 return result->b;
322 }
323
324 case R600_QUERY_GPIN_ASIC_ID:
325 result->u32 = 0;
326 return true;
327 case R600_QUERY_GPIN_NUM_SIMD:
328 result->u32 = rctx->screen->info.num_good_compute_units;
329 return true;
330 case R600_QUERY_GPIN_NUM_RB:
331 result->u32 = rctx->screen->info.num_render_backends;
332 return true;
333 case R600_QUERY_GPIN_NUM_SPI:
334 result->u32 = 1; /* all supported chips have one SPI per SE */
335 return true;
336 case R600_QUERY_GPIN_NUM_SE:
337 result->u32 = rctx->screen->info.max_se;
338 return true;
339 }
340
341 result->u64 = query->end_result - query->begin_result;
342
343 switch (query->b.type) {
344 case R600_QUERY_BUFFER_WAIT_TIME:
345 case R600_QUERY_GPU_TEMPERATURE:
346 result->u64 /= 1000;
347 break;
348 case R600_QUERY_CURRENT_GPU_SCLK:
349 case R600_QUERY_CURRENT_GPU_MCLK:
350 result->u64 *= 1000000;
351 break;
352 }
353
354 return true;
355 }
356
357
358 static struct r600_query_ops sw_query_ops = {
359 .destroy = r600_query_sw_destroy,
360 .begin = r600_query_sw_begin,
361 .end = r600_query_sw_end,
362 .get_result = r600_query_sw_get_result,
363 .get_result_resource = NULL
364 };
365
366 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
367 unsigned query_type)
368 {
369 struct r600_query_sw *query;
370
371 query = CALLOC_STRUCT(r600_query_sw);
372 if (!query)
373 return NULL;
374
375 query->b.type = query_type;
376 query->b.ops = &sw_query_ops;
377
378 return (struct pipe_query *)query;
379 }
380
381 void r600_query_hw_destroy(struct r600_common_context *rctx,
382 struct r600_query *rquery)
383 {
384 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
385 struct r600_query_buffer *prev = query->buffer.previous;
386
387 /* Release all query buffers. */
388 while (prev) {
389 struct r600_query_buffer *qbuf = prev;
390 prev = prev->previous;
391 r600_resource_reference(&qbuf->buf, NULL);
392 FREE(qbuf);
393 }
394
395 r600_resource_reference(&query->buffer.buf, NULL);
396 FREE(rquery);
397 }
398
399 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
400 struct r600_query_hw *query)
401 {
402 unsigned buf_size = MAX2(query->result_size,
403 ctx->screen->info.min_alloc_size);
404
405 /* Queries are normally read by the CPU after
406 * being written by the gpu, hence staging is probably a good
407 * usage pattern.
408 */
409 struct r600_resource *buf = (struct r600_resource*)
410 pipe_buffer_create(ctx->b.screen, 0,
411 PIPE_USAGE_STAGING, buf_size);
412 if (!buf)
413 return NULL;
414
415 if (!query->ops->prepare_buffer(ctx, query, buf)) {
416 r600_resource_reference(&buf, NULL);
417 return NULL;
418 }
419
420 return buf;
421 }
422
423 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
424 struct r600_query_hw *query,
425 struct r600_resource *buffer)
426 {
427 /* Callers ensure that the buffer is currently unused by the GPU. */
428 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
429 PIPE_TRANSFER_WRITE |
430 PIPE_TRANSFER_UNSYNCHRONIZED);
431 if (!results)
432 return false;
433
434 memset(results, 0, buffer->b.b.width0);
435
436 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
437 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
438 unsigned num_results;
439 unsigned i, j;
440
441 /* Set top bits for unused backends. */
442 num_results = buffer->b.b.width0 / query->result_size;
443 for (j = 0; j < num_results; j++) {
444 for (i = 0; i < ctx->max_db; i++) {
445 if (!(ctx->backend_mask & (1<<i))) {
446 results[(i * 4)+1] = 0x80000000;
447 results[(i * 4)+3] = 0x80000000;
448 }
449 }
450 results += 4 * ctx->max_db;
451 }
452 }
453
454 return true;
455 }
456
457 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
458 struct r600_query *rquery,
459 bool wait,
460 enum pipe_query_value_type result_type,
461 int index,
462 struct pipe_resource *resource,
463 unsigned offset);
464
465 static struct r600_query_ops query_hw_ops = {
466 .destroy = r600_query_hw_destroy,
467 .begin = r600_query_hw_begin,
468 .end = r600_query_hw_end,
469 .get_result = r600_query_hw_get_result,
470 .get_result_resource = r600_query_hw_get_result_resource,
471 };
472
473 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
474 struct r600_query_hw *query,
475 struct r600_resource *buffer,
476 uint64_t va);
477 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
478 struct r600_query_hw *query,
479 struct r600_resource *buffer,
480 uint64_t va);
481 static void r600_query_hw_add_result(struct r600_common_context *ctx,
482 struct r600_query_hw *, void *buffer,
483 union pipe_query_result *result);
484 static void r600_query_hw_clear_result(struct r600_query_hw *,
485 union pipe_query_result *);
486
487 static struct r600_query_hw_ops query_hw_default_hw_ops = {
488 .prepare_buffer = r600_query_hw_prepare_buffer,
489 .emit_start = r600_query_hw_do_emit_start,
490 .emit_stop = r600_query_hw_do_emit_stop,
491 .clear_result = r600_query_hw_clear_result,
492 .add_result = r600_query_hw_add_result,
493 };
494
495 bool r600_query_hw_init(struct r600_common_context *rctx,
496 struct r600_query_hw *query)
497 {
498 query->buffer.buf = r600_new_query_buffer(rctx, query);
499 if (!query->buffer.buf)
500 return false;
501
502 return true;
503 }
504
505 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
506 unsigned query_type,
507 unsigned index)
508 {
509 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
510 if (!query)
511 return NULL;
512
513 query->b.type = query_type;
514 query->b.ops = &query_hw_ops;
515 query->ops = &query_hw_default_hw_ops;
516
517 switch (query_type) {
518 case PIPE_QUERY_OCCLUSION_COUNTER:
519 case PIPE_QUERY_OCCLUSION_PREDICATE:
520 query->result_size = 16 * rctx->max_db;
521 query->result_size += 16; /* for the fence + alignment */
522 query->num_cs_dw_begin = 6;
523 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
524 break;
525 case PIPE_QUERY_TIME_ELAPSED:
526 query->result_size = 24;
527 query->num_cs_dw_begin = 8;
528 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
529 break;
530 case PIPE_QUERY_TIMESTAMP:
531 query->result_size = 16;
532 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
533 query->flags = R600_QUERY_HW_FLAG_NO_START;
534 break;
535 case PIPE_QUERY_PRIMITIVES_EMITTED:
536 case PIPE_QUERY_PRIMITIVES_GENERATED:
537 case PIPE_QUERY_SO_STATISTICS:
538 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
539 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
540 query->result_size = 32;
541 query->num_cs_dw_begin = 6;
542 query->num_cs_dw_end = 6;
543 query->stream = index;
544 break;
545 case PIPE_QUERY_PIPELINE_STATISTICS:
546 /* 11 values on EG, 8 on R600. */
547 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
548 query->result_size += 8; /* for the fence + alignment */
549 query->num_cs_dw_begin = 6;
550 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
551 break;
552 default:
553 assert(0);
554 FREE(query);
555 return NULL;
556 }
557
558 if (!r600_query_hw_init(rctx, query)) {
559 FREE(query);
560 return NULL;
561 }
562
563 return (struct pipe_query *)query;
564 }
565
566 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
567 unsigned type, int diff)
568 {
569 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
570 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
571 bool old_enable = rctx->num_occlusion_queries != 0;
572 bool old_perfect_enable =
573 rctx->num_perfect_occlusion_queries != 0;
574 bool enable, perfect_enable;
575
576 rctx->num_occlusion_queries += diff;
577 assert(rctx->num_occlusion_queries >= 0);
578
579 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
580 rctx->num_perfect_occlusion_queries += diff;
581 assert(rctx->num_perfect_occlusion_queries >= 0);
582 }
583
584 enable = rctx->num_occlusion_queries != 0;
585 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
586
587 if (enable != old_enable || perfect_enable != old_perfect_enable) {
588 rctx->set_occlusion_query_state(&rctx->b, enable);
589 }
590 }
591 }
592
593 static unsigned event_type_for_stream(struct r600_query_hw *query)
594 {
595 switch (query->stream) {
596 default:
597 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
598 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
599 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
600 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
601 }
602 }
603
604 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
605 struct r600_query_hw *query,
606 struct r600_resource *buffer,
607 uint64_t va)
608 {
609 struct radeon_winsys_cs *cs = ctx->gfx.cs;
610
611 switch (query->b.type) {
612 case PIPE_QUERY_OCCLUSION_COUNTER:
613 case PIPE_QUERY_OCCLUSION_PREDICATE:
614 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
615 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
616 radeon_emit(cs, va);
617 radeon_emit(cs, (va >> 32) & 0xFFFF);
618 break;
619 case PIPE_QUERY_PRIMITIVES_EMITTED:
620 case PIPE_QUERY_PRIMITIVES_GENERATED:
621 case PIPE_QUERY_SO_STATISTICS:
622 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
623 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
624 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
625 radeon_emit(cs, va);
626 radeon_emit(cs, (va >> 32) & 0xFFFF);
627 break;
628 case PIPE_QUERY_TIME_ELAPSED:
629 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
630 0, 3, NULL, va, 0, 0);
631 break;
632 case PIPE_QUERY_PIPELINE_STATISTICS:
633 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
634 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
635 radeon_emit(cs, va);
636 radeon_emit(cs, (va >> 32) & 0xFFFF);
637 break;
638 default:
639 assert(0);
640 }
641 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
642 RADEON_PRIO_QUERY);
643 }
644
645 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
646 struct r600_query_hw *query)
647 {
648 uint64_t va;
649
650 if (!query->buffer.buf)
651 return; // previous buffer allocation failure
652
653 r600_update_occlusion_query_state(ctx, query->b.type, 1);
654 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
655
656 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
657 true);
658
659 /* Get a new query buffer if needed. */
660 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
661 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
662 *qbuf = query->buffer;
663 query->buffer.results_end = 0;
664 query->buffer.previous = qbuf;
665 query->buffer.buf = r600_new_query_buffer(ctx, query);
666 if (!query->buffer.buf)
667 return;
668 }
669
670 /* emit begin query */
671 va = query->buffer.buf->gpu_address + query->buffer.results_end;
672
673 query->ops->emit_start(ctx, query, query->buffer.buf, va);
674
675 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
676 }
677
678 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
679 struct r600_query_hw *query,
680 struct r600_resource *buffer,
681 uint64_t va)
682 {
683 struct radeon_winsys_cs *cs = ctx->gfx.cs;
684 uint64_t fence_va = 0;
685
686 switch (query->b.type) {
687 case PIPE_QUERY_OCCLUSION_COUNTER:
688 case PIPE_QUERY_OCCLUSION_PREDICATE:
689 va += 8;
690 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
691 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
692 radeon_emit(cs, va);
693 radeon_emit(cs, (va >> 32) & 0xFFFF);
694
695 fence_va = va + ctx->max_db * 16 - 8;
696 break;
697 case PIPE_QUERY_PRIMITIVES_EMITTED:
698 case PIPE_QUERY_PRIMITIVES_GENERATED:
699 case PIPE_QUERY_SO_STATISTICS:
700 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
701 va += query->result_size/2;
702 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
703 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
704 radeon_emit(cs, va);
705 radeon_emit(cs, (va >> 32) & 0xFFFF);
706 break;
707 case PIPE_QUERY_TIME_ELAPSED:
708 va += 8;
709 /* fall through */
710 case PIPE_QUERY_TIMESTAMP:
711 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
712 0, 3, NULL, va, 0, 0);
713 fence_va = va + 8;
714 break;
715 case PIPE_QUERY_PIPELINE_STATISTICS: {
716 unsigned sample_size = (query->result_size - 8) / 2;
717
718 va += sample_size;
719 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
720 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
721 radeon_emit(cs, va);
722 radeon_emit(cs, (va >> 32) & 0xFFFF);
723
724 fence_va = va + sample_size;
725 break;
726 }
727 default:
728 assert(0);
729 }
730 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
731 RADEON_PRIO_QUERY);
732
733 if (fence_va)
734 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
735 query->buffer.buf, fence_va, 0, 0x80000000);
736 }
737
738 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
739 struct r600_query_hw *query)
740 {
741 uint64_t va;
742
743 if (!query->buffer.buf)
744 return; // previous buffer allocation failure
745
746 /* The queries which need begin already called this in begin_query. */
747 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
748 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
749 }
750
751 /* emit end query */
752 va = query->buffer.buf->gpu_address + query->buffer.results_end;
753
754 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
755
756 query->buffer.results_end += query->result_size;
757
758 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
759 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
760
761 r600_update_occlusion_query_state(ctx, query->b.type, -1);
762 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
763 }
764
765 static void r600_emit_query_predication(struct r600_common_context *ctx,
766 struct r600_atom *atom)
767 {
768 struct radeon_winsys_cs *cs = ctx->gfx.cs;
769 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
770 struct r600_query_buffer *qbuf;
771 uint32_t op;
772 bool flag_wait;
773
774 if (!query)
775 return;
776
777 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
778 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
779
780 switch (query->b.type) {
781 case PIPE_QUERY_OCCLUSION_COUNTER:
782 case PIPE_QUERY_OCCLUSION_PREDICATE:
783 op = PRED_OP(PREDICATION_OP_ZPASS);
784 break;
785 case PIPE_QUERY_PRIMITIVES_EMITTED:
786 case PIPE_QUERY_PRIMITIVES_GENERATED:
787 case PIPE_QUERY_SO_STATISTICS:
788 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
789 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
790 break;
791 default:
792 assert(0);
793 return;
794 }
795
796 /* if true then invert, see GL_ARB_conditional_render_inverted */
797 if (ctx->render_cond_invert)
798 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
799 else
800 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
801
802 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
803
804 /* emit predicate packets for all data blocks */
805 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
806 unsigned results_base = 0;
807 uint64_t va = qbuf->buf->gpu_address;
808
809 while (results_base < qbuf->results_end) {
810 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
811 radeon_emit(cs, va + results_base);
812 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
813 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
814 RADEON_PRIO_QUERY);
815 results_base += query->result_size;
816
817 /* set CONTINUE bit for all packets except the first */
818 op |= PREDICATION_CONTINUE;
819 }
820 }
821 }
822
823 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
824 {
825 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
826
827 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
828 query_type == PIPE_QUERY_GPU_FINISHED ||
829 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
830 return r600_query_sw_create(ctx, query_type);
831
832 return r600_query_hw_create(rctx, query_type, index);
833 }
834
835 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
836 {
837 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
838 struct r600_query *rquery = (struct r600_query *)query;
839
840 rquery->ops->destroy(rctx, rquery);
841 }
842
843 static boolean r600_begin_query(struct pipe_context *ctx,
844 struct pipe_query *query)
845 {
846 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
847 struct r600_query *rquery = (struct r600_query *)query;
848
849 return rquery->ops->begin(rctx, rquery);
850 }
851
852 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
853 struct r600_query_hw *query)
854 {
855 struct r600_query_buffer *prev = query->buffer.previous;
856
857 /* Discard the old query buffers. */
858 while (prev) {
859 struct r600_query_buffer *qbuf = prev;
860 prev = prev->previous;
861 r600_resource_reference(&qbuf->buf, NULL);
862 FREE(qbuf);
863 }
864
865 query->buffer.results_end = 0;
866 query->buffer.previous = NULL;
867
868 /* Obtain a new buffer if the current one can't be mapped without a stall. */
869 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
870 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
871 r600_resource_reference(&query->buffer.buf, NULL);
872 query->buffer.buf = r600_new_query_buffer(rctx, query);
873 } else {
874 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
875 r600_resource_reference(&query->buffer.buf, NULL);
876 }
877 }
878
879 bool r600_query_hw_begin(struct r600_common_context *rctx,
880 struct r600_query *rquery)
881 {
882 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
883
884 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
885 assert(0);
886 return false;
887 }
888
889 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
890 r600_query_hw_reset_buffers(rctx, query);
891
892 r600_query_hw_emit_start(rctx, query);
893 if (!query->buffer.buf)
894 return false;
895
896 LIST_ADDTAIL(&query->list, &rctx->active_queries);
897 return true;
898 }
899
900 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
901 {
902 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
903 struct r600_query *rquery = (struct r600_query *)query;
904
905 return rquery->ops->end(rctx, rquery);
906 }
907
908 bool r600_query_hw_end(struct r600_common_context *rctx,
909 struct r600_query *rquery)
910 {
911 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
912
913 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
914 r600_query_hw_reset_buffers(rctx, query);
915
916 r600_query_hw_emit_stop(rctx, query);
917
918 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
919 LIST_DELINIT(&query->list);
920
921 if (!query->buffer.buf)
922 return false;
923
924 return true;
925 }
926
927 static void r600_get_hw_query_params(struct r600_common_context *rctx,
928 struct r600_query_hw *rquery, int index,
929 struct r600_hw_query_params *params)
930 {
931 params->pair_stride = 0;
932 params->pair_count = 1;
933
934 switch (rquery->b.type) {
935 case PIPE_QUERY_OCCLUSION_COUNTER:
936 case PIPE_QUERY_OCCLUSION_PREDICATE:
937 params->start_offset = 0;
938 params->end_offset = 8;
939 params->fence_offset = rctx->max_db * 16;
940 params->pair_stride = 16;
941 params->pair_count = rctx->max_db;
942 break;
943 case PIPE_QUERY_TIME_ELAPSED:
944 params->start_offset = 0;
945 params->end_offset = 8;
946 params->fence_offset = 16;
947 break;
948 case PIPE_QUERY_TIMESTAMP:
949 params->start_offset = 0;
950 params->end_offset = 0;
951 params->fence_offset = 8;
952 break;
953 case PIPE_QUERY_PRIMITIVES_EMITTED:
954 params->start_offset = 8;
955 params->end_offset = 24;
956 params->fence_offset = params->end_offset + 4;
957 break;
958 case PIPE_QUERY_PRIMITIVES_GENERATED:
959 params->start_offset = 0;
960 params->end_offset = 16;
961 params->fence_offset = params->end_offset + 4;
962 break;
963 case PIPE_QUERY_SO_STATISTICS:
964 params->start_offset = 8 - index * 8;
965 params->end_offset = 24 - index * 8;
966 params->fence_offset = params->end_offset + 4;
967 break;
968 case PIPE_QUERY_PIPELINE_STATISTICS:
969 {
970 /* Offsets apply to EG+ */
971 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
972 params->start_offset = offsets[index];
973 params->end_offset = 88 + offsets[index];
974 params->fence_offset = 2 * 88;
975 break;
976 }
977 default:
978 unreachable("r600_get_hw_query_params unsupported");
979 }
980 }
981
982 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
983 bool test_status_bit)
984 {
985 uint32_t *current_result = (uint32_t*)map;
986 uint64_t start, end;
987
988 start = (uint64_t)current_result[start_index] |
989 (uint64_t)current_result[start_index+1] << 32;
990 end = (uint64_t)current_result[end_index] |
991 (uint64_t)current_result[end_index+1] << 32;
992
993 if (!test_status_bit ||
994 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
995 return end - start;
996 }
997 return 0;
998 }
999
1000 static void r600_query_hw_add_result(struct r600_common_context *ctx,
1001 struct r600_query_hw *query,
1002 void *buffer,
1003 union pipe_query_result *result)
1004 {
1005 switch (query->b.type) {
1006 case PIPE_QUERY_OCCLUSION_COUNTER: {
1007 for (unsigned i = 0; i < ctx->max_db; ++i) {
1008 unsigned results_base = i * 16;
1009 result->u64 +=
1010 r600_query_read_result(buffer + results_base, 0, 2, true);
1011 }
1012 break;
1013 }
1014 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1015 for (unsigned i = 0; i < ctx->max_db; ++i) {
1016 unsigned results_base = i * 16;
1017 result->b = result->b ||
1018 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1019 }
1020 break;
1021 }
1022 case PIPE_QUERY_TIME_ELAPSED:
1023 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1024 break;
1025 case PIPE_QUERY_TIMESTAMP:
1026 result->u64 = *(uint64_t*)buffer;
1027 break;
1028 case PIPE_QUERY_PRIMITIVES_EMITTED:
1029 /* SAMPLE_STREAMOUTSTATS stores this structure:
1030 * {
1031 * u64 NumPrimitivesWritten;
1032 * u64 PrimitiveStorageNeeded;
1033 * }
1034 * We only need NumPrimitivesWritten here. */
1035 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1036 break;
1037 case PIPE_QUERY_PRIMITIVES_GENERATED:
1038 /* Here we read PrimitiveStorageNeeded. */
1039 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1040 break;
1041 case PIPE_QUERY_SO_STATISTICS:
1042 result->so_statistics.num_primitives_written +=
1043 r600_query_read_result(buffer, 2, 6, true);
1044 result->so_statistics.primitives_storage_needed +=
1045 r600_query_read_result(buffer, 0, 4, true);
1046 break;
1047 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1048 result->b = result->b ||
1049 r600_query_read_result(buffer, 2, 6, true) !=
1050 r600_query_read_result(buffer, 0, 4, true);
1051 break;
1052 case PIPE_QUERY_PIPELINE_STATISTICS:
1053 if (ctx->chip_class >= EVERGREEN) {
1054 result->pipeline_statistics.ps_invocations +=
1055 r600_query_read_result(buffer, 0, 22, false);
1056 result->pipeline_statistics.c_primitives +=
1057 r600_query_read_result(buffer, 2, 24, false);
1058 result->pipeline_statistics.c_invocations +=
1059 r600_query_read_result(buffer, 4, 26, false);
1060 result->pipeline_statistics.vs_invocations +=
1061 r600_query_read_result(buffer, 6, 28, false);
1062 result->pipeline_statistics.gs_invocations +=
1063 r600_query_read_result(buffer, 8, 30, false);
1064 result->pipeline_statistics.gs_primitives +=
1065 r600_query_read_result(buffer, 10, 32, false);
1066 result->pipeline_statistics.ia_primitives +=
1067 r600_query_read_result(buffer, 12, 34, false);
1068 result->pipeline_statistics.ia_vertices +=
1069 r600_query_read_result(buffer, 14, 36, false);
1070 result->pipeline_statistics.hs_invocations +=
1071 r600_query_read_result(buffer, 16, 38, false);
1072 result->pipeline_statistics.ds_invocations +=
1073 r600_query_read_result(buffer, 18, 40, false);
1074 result->pipeline_statistics.cs_invocations +=
1075 r600_query_read_result(buffer, 20, 42, false);
1076 } else {
1077 result->pipeline_statistics.ps_invocations +=
1078 r600_query_read_result(buffer, 0, 16, false);
1079 result->pipeline_statistics.c_primitives +=
1080 r600_query_read_result(buffer, 2, 18, false);
1081 result->pipeline_statistics.c_invocations +=
1082 r600_query_read_result(buffer, 4, 20, false);
1083 result->pipeline_statistics.vs_invocations +=
1084 r600_query_read_result(buffer, 6, 22, false);
1085 result->pipeline_statistics.gs_invocations +=
1086 r600_query_read_result(buffer, 8, 24, false);
1087 result->pipeline_statistics.gs_primitives +=
1088 r600_query_read_result(buffer, 10, 26, false);
1089 result->pipeline_statistics.ia_primitives +=
1090 r600_query_read_result(buffer, 12, 28, false);
1091 result->pipeline_statistics.ia_vertices +=
1092 r600_query_read_result(buffer, 14, 30, false);
1093 }
1094 #if 0 /* for testing */
1095 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1096 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1097 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1098 result->pipeline_statistics.ia_vertices,
1099 result->pipeline_statistics.ia_primitives,
1100 result->pipeline_statistics.vs_invocations,
1101 result->pipeline_statistics.hs_invocations,
1102 result->pipeline_statistics.ds_invocations,
1103 result->pipeline_statistics.gs_invocations,
1104 result->pipeline_statistics.gs_primitives,
1105 result->pipeline_statistics.c_invocations,
1106 result->pipeline_statistics.c_primitives,
1107 result->pipeline_statistics.ps_invocations,
1108 result->pipeline_statistics.cs_invocations);
1109 #endif
1110 break;
1111 default:
1112 assert(0);
1113 }
1114 }
1115
1116 static boolean r600_get_query_result(struct pipe_context *ctx,
1117 struct pipe_query *query, boolean wait,
1118 union pipe_query_result *result)
1119 {
1120 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1121 struct r600_query *rquery = (struct r600_query *)query;
1122
1123 return rquery->ops->get_result(rctx, rquery, wait, result);
1124 }
1125
1126 static void r600_get_query_result_resource(struct pipe_context *ctx,
1127 struct pipe_query *query,
1128 boolean wait,
1129 enum pipe_query_value_type result_type,
1130 int index,
1131 struct pipe_resource *resource,
1132 unsigned offset)
1133 {
1134 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1135 struct r600_query *rquery = (struct r600_query *)query;
1136
1137 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1138 resource, offset);
1139 }
1140
1141 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1142 union pipe_query_result *result)
1143 {
1144 util_query_clear_result(result, query->b.type);
1145 }
1146
1147 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1148 struct r600_query *rquery,
1149 bool wait, union pipe_query_result *result)
1150 {
1151 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1152 struct r600_query_buffer *qbuf;
1153
1154 query->ops->clear_result(query, result);
1155
1156 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1157 unsigned results_base = 0;
1158 void *map;
1159
1160 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1161 PIPE_TRANSFER_READ |
1162 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1163 if (!map)
1164 return false;
1165
1166 while (results_base != qbuf->results_end) {
1167 query->ops->add_result(rctx, query, map + results_base,
1168 result);
1169 results_base += query->result_size;
1170 }
1171 }
1172
1173 /* Convert the time to expected units. */
1174 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1175 rquery->type == PIPE_QUERY_TIMESTAMP) {
1176 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1177 }
1178 return true;
1179 }
1180
1181 /* Create the compute shader that is used to collect the results.
1182 *
1183 * One compute grid with a single thread is launched for every query result
1184 * buffer. The thread (optionally) reads a previous summary buffer, then
1185 * accumulates data from the query result buffer, and writes the result either
1186 * to a summary buffer to be consumed by the next grid invocation or to the
1187 * user-supplied buffer.
1188 *
1189 * Data layout:
1190 *
1191 * CONST
1192 * 0.x = end_offset
1193 * 0.y = result_stride
1194 * 0.z = result_count
1195 * 0.w = bit field:
1196 * 1: read previously accumulated values
1197 * 2: write accumulated values for chaining
1198 * 4: write result available
1199 * 8: convert result to boolean (0/1)
1200 * 16: only read one dword and use that as result
1201 * 32: apply timestamp conversion
1202 * 64: store full 64 bits result
1203 * 128: store signed 32 bits result
1204 * 1.x = fence_offset
1205 * 1.y = pair_stride
1206 * 1.z = pair_count
1207 *
1208 * BUFFER[0] = query result buffer
1209 * BUFFER[1] = previous summary buffer
1210 * BUFFER[2] = next summary buffer or user-supplied buffer
1211 */
1212 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1213 {
1214 /* TEMP[0].xy = accumulated result so far
1215 * TEMP[0].z = result not available
1216 *
1217 * TEMP[1].x = current result index
1218 * TEMP[1].y = current pair index
1219 */
1220 static const char text_tmpl[] =
1221 "COMP\n"
1222 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1223 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1224 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1225 "DCL BUFFER[0]\n"
1226 "DCL BUFFER[1]\n"
1227 "DCL BUFFER[2]\n"
1228 "DCL CONST[0..1]\n"
1229 "DCL TEMP[0..5]\n"
1230 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1231 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1232 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1233 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1234
1235 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1236 "UIF TEMP[5]\n"
1237 /* Check result availability. */
1238 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1239 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1240 "MOV TEMP[1], TEMP[0].zzzz\n"
1241 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1242
1243 /* Load result if available. */
1244 "UIF TEMP[1]\n"
1245 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1246 "ENDIF\n"
1247 "ELSE\n"
1248 /* Load previously accumulated result if requested. */
1249 "MOV TEMP[0], IMM[0].xxxx\n"
1250 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1251 "UIF TEMP[4]\n"
1252 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1253 "ENDIF\n"
1254
1255 "MOV TEMP[1].x, IMM[0].xxxx\n"
1256 "BGNLOOP\n"
1257 /* Break if accumulated result so far is not available. */
1258 "UIF TEMP[0].zzzz\n"
1259 "BRK\n"
1260 "ENDIF\n"
1261
1262 /* Break if result_index >= result_count. */
1263 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1264 "UIF TEMP[5]\n"
1265 "BRK\n"
1266 "ENDIF\n"
1267
1268 /* Load fence and check result availability */
1269 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1270 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1271 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1272 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1273 "UIF TEMP[0].zzzz\n"
1274 "BRK\n"
1275 "ENDIF\n"
1276
1277 "MOV TEMP[1].y, IMM[0].xxxx\n"
1278 "BGNLOOP\n"
1279 /* Load start and end. */
1280 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1281 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1282 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1283
1284 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1285 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1286
1287 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1288 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1289
1290 /* Increment pair index */
1291 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1292 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1293 "UIF TEMP[5]\n"
1294 "BRK\n"
1295 "ENDIF\n"
1296 "ENDLOOP\n"
1297
1298 /* Increment result index */
1299 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1300 "ENDLOOP\n"
1301 "ENDIF\n"
1302
1303 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1304 "UIF TEMP[4]\n"
1305 /* Store accumulated data for chaining. */
1306 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1307 "ELSE\n"
1308 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1309 "UIF TEMP[4]\n"
1310 /* Store result availability. */
1311 "NOT TEMP[0].z, TEMP[0]\n"
1312 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1313 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1314
1315 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1316 "UIF TEMP[4]\n"
1317 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1318 "ENDIF\n"
1319 "ELSE\n"
1320 /* Store result if it is available. */
1321 "NOT TEMP[4], TEMP[0].zzzz\n"
1322 "UIF TEMP[4]\n"
1323 /* Apply timestamp conversion */
1324 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1325 "UIF TEMP[4]\n"
1326 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1327 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1328 "ENDIF\n"
1329
1330 /* Convert to boolean */
1331 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1332 "UIF TEMP[4]\n"
1333 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1334 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1335 "MOV TEMP[0].y, IMM[0].xxxx\n"
1336 "ENDIF\n"
1337
1338 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1339 "UIF TEMP[4]\n"
1340 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1341 "ELSE\n"
1342 /* Clamping */
1343 "UIF TEMP[0].yyyy\n"
1344 "MOV TEMP[0].x, IMM[0].wwww\n"
1345 "ENDIF\n"
1346
1347 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1348 "UIF TEMP[4]\n"
1349 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1350 "ENDIF\n"
1351
1352 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1353 "ENDIF\n"
1354 "ENDIF\n"
1355 "ENDIF\n"
1356 "ENDIF\n"
1357
1358 "END\n";
1359
1360 char text[sizeof(text_tmpl) + 32];
1361 struct tgsi_token tokens[1024];
1362 struct pipe_compute_state state = {};
1363
1364 /* Hard code the frequency into the shader so that the backend can
1365 * use the full range of optimizations for divide-by-constant.
1366 */
1367 snprintf(text, sizeof(text), text_tmpl,
1368 rctx->screen->info.clock_crystal_freq);
1369
1370 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1371 assert(false);
1372 return;
1373 }
1374
1375 state.ir_type = PIPE_SHADER_IR_TGSI;
1376 state.prog = tokens;
1377
1378 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1379 }
1380
1381 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1382 struct r600_qbo_state *st)
1383 {
1384 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1385
1386 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1387 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1388
1389 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1390 for (unsigned i = 0; i < 3; ++i)
1391 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1392 }
1393
1394 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1395 struct r600_query *rquery,
1396 bool wait,
1397 enum pipe_query_value_type result_type,
1398 int index,
1399 struct pipe_resource *resource,
1400 unsigned offset)
1401 {
1402 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1403 struct r600_query_buffer *qbuf;
1404 struct r600_query_buffer *qbuf_prev;
1405 struct pipe_resource *tmp_buffer = NULL;
1406 unsigned tmp_buffer_offset = 0;
1407 struct r600_qbo_state saved_state = {};
1408 struct pipe_grid_info grid = {};
1409 struct pipe_constant_buffer constant_buffer = {};
1410 struct pipe_shader_buffer ssbo[3];
1411 struct r600_hw_query_params params;
1412 struct {
1413 uint32_t end_offset;
1414 uint32_t result_stride;
1415 uint32_t result_count;
1416 uint32_t config;
1417 uint32_t fence_offset;
1418 uint32_t pair_stride;
1419 uint32_t pair_count;
1420 } consts;
1421
1422 if (!rctx->query_result_shader) {
1423 r600_create_query_result_shader(rctx);
1424 if (!rctx->query_result_shader)
1425 return;
1426 }
1427
1428 if (query->buffer.previous) {
1429 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1430 &tmp_buffer_offset, &tmp_buffer);
1431 if (!tmp_buffer)
1432 return;
1433 }
1434
1435 rctx->save_qbo_state(&rctx->b, &saved_state);
1436
1437 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1438 consts.end_offset = params.end_offset - params.start_offset;
1439 consts.fence_offset = params.fence_offset - params.start_offset;
1440 consts.result_stride = query->result_size;
1441 consts.pair_stride = params.pair_stride;
1442 consts.pair_count = params.pair_count;
1443
1444 constant_buffer.buffer_size = sizeof(consts);
1445 constant_buffer.user_buffer = &consts;
1446
1447 ssbo[1].buffer = tmp_buffer;
1448 ssbo[1].buffer_offset = tmp_buffer_offset;
1449 ssbo[1].buffer_size = 16;
1450
1451 ssbo[2] = ssbo[1];
1452
1453 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1454
1455 grid.block[0] = 1;
1456 grid.block[1] = 1;
1457 grid.block[2] = 1;
1458 grid.grid[0] = 1;
1459 grid.grid[1] = 1;
1460 grid.grid[2] = 1;
1461
1462 consts.config = 0;
1463 if (index < 0)
1464 consts.config |= 4;
1465 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1466 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1467 consts.config |= 8;
1468 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1469 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1470 consts.config |= 32;
1471
1472 switch (result_type) {
1473 case PIPE_QUERY_TYPE_U64:
1474 case PIPE_QUERY_TYPE_I64:
1475 consts.config |= 64;
1476 break;
1477 case PIPE_QUERY_TYPE_I32:
1478 consts.config |= 128;
1479 break;
1480 case PIPE_QUERY_TYPE_U32:
1481 break;
1482 }
1483
1484 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1485
1486 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1487 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1488 qbuf_prev = qbuf->previous;
1489 consts.result_count = qbuf->results_end / query->result_size;
1490 consts.config &= ~3;
1491 if (qbuf != &query->buffer)
1492 consts.config |= 1;
1493 if (qbuf->previous)
1494 consts.config |= 2;
1495 } else {
1496 /* Only read the last timestamp. */
1497 qbuf_prev = NULL;
1498 consts.result_count = 0;
1499 consts.config |= 16;
1500 params.start_offset += qbuf->results_end - query->result_size;
1501 }
1502
1503 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1504
1505 ssbo[0].buffer = &qbuf->buf->b.b;
1506 ssbo[0].buffer_offset = params.start_offset;
1507 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1508
1509 if (!qbuf->previous) {
1510 ssbo[2].buffer = resource;
1511 ssbo[2].buffer_offset = offset;
1512 ssbo[2].buffer_size = 8;
1513
1514 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1515 }
1516
1517 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1518
1519 if (wait && qbuf == &query->buffer) {
1520 uint64_t va;
1521
1522 /* Wait for result availability. Wait only for readiness
1523 * of the last entry, since the fence writes should be
1524 * serialized in the CP.
1525 */
1526 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1527 va += params.fence_offset;
1528
1529 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1530 }
1531
1532 rctx->b.launch_grid(&rctx->b, &grid);
1533 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1534 }
1535
1536 r600_restore_qbo_state(rctx, &saved_state);
1537 pipe_resource_reference(&tmp_buffer, NULL);
1538 }
1539
1540 static void r600_render_condition(struct pipe_context *ctx,
1541 struct pipe_query *query,
1542 boolean condition,
1543 uint mode)
1544 {
1545 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1546 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1547 struct r600_query_buffer *qbuf;
1548 struct r600_atom *atom = &rctx->render_cond_atom;
1549
1550 rctx->render_cond = query;
1551 rctx->render_cond_invert = condition;
1552 rctx->render_cond_mode = mode;
1553
1554 /* Compute the size of SET_PREDICATION packets. */
1555 atom->num_dw = 0;
1556 if (query) {
1557 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1558 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1559 }
1560
1561 rctx->set_atom_dirty(rctx, atom, query != NULL);
1562 }
1563
1564 void r600_suspend_queries(struct r600_common_context *ctx)
1565 {
1566 struct r600_query_hw *query;
1567
1568 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1569 r600_query_hw_emit_stop(ctx, query);
1570 }
1571 assert(ctx->num_cs_dw_queries_suspend == 0);
1572 }
1573
1574 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1575 struct list_head *query_list)
1576 {
1577 struct r600_query_hw *query;
1578 unsigned num_dw = 0;
1579
1580 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1581 /* begin + end */
1582 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1583
1584 /* Workaround for the fact that
1585 * num_cs_dw_nontimer_queries_suspend is incremented for every
1586 * resumed query, which raises the bar in need_cs_space for
1587 * queries about to be resumed.
1588 */
1589 num_dw += query->num_cs_dw_end;
1590 }
1591 /* primitives generated query */
1592 num_dw += ctx->streamout.enable_atom.num_dw;
1593 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1594 num_dw += 13;
1595
1596 return num_dw;
1597 }
1598
1599 void r600_resume_queries(struct r600_common_context *ctx)
1600 {
1601 struct r600_query_hw *query;
1602 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1603
1604 assert(ctx->num_cs_dw_queries_suspend == 0);
1605
1606 /* Check CS space here. Resuming must not be interrupted by flushes. */
1607 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1608
1609 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1610 r600_query_hw_emit_start(ctx, query);
1611 }
1612 }
1613
1614 /* Get backends mask */
1615 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1616 {
1617 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1618 struct r600_resource *buffer;
1619 uint32_t *results;
1620 unsigned num_backends = ctx->screen->info.num_render_backends;
1621 unsigned i, mask = 0;
1622
1623 /* if backend_map query is supported by the kernel */
1624 if (ctx->screen->info.r600_gb_backend_map_valid) {
1625 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1626 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1627 unsigned item_width, item_mask;
1628
1629 if (ctx->chip_class >= EVERGREEN) {
1630 item_width = 4;
1631 item_mask = 0x7;
1632 } else {
1633 item_width = 2;
1634 item_mask = 0x3;
1635 }
1636
1637 while (num_tile_pipes--) {
1638 i = backend_map & item_mask;
1639 mask |= (1<<i);
1640 backend_map >>= item_width;
1641 }
1642 if (mask != 0) {
1643 ctx->backend_mask = mask;
1644 return;
1645 }
1646 }
1647
1648 /* otherwise backup path for older kernels */
1649
1650 /* create buffer for event data */
1651 buffer = (struct r600_resource*)
1652 pipe_buffer_create(ctx->b.screen, 0,
1653 PIPE_USAGE_STAGING, ctx->max_db*16);
1654 if (!buffer)
1655 goto err;
1656
1657 /* initialize buffer with zeroes */
1658 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1659 if (results) {
1660 memset(results, 0, ctx->max_db * 4 * 4);
1661
1662 /* emit EVENT_WRITE for ZPASS_DONE */
1663 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1664 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1665 radeon_emit(cs, buffer->gpu_address);
1666 radeon_emit(cs, buffer->gpu_address >> 32);
1667
1668 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1669 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1670
1671 /* analyze results */
1672 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1673 if (results) {
1674 for(i = 0; i < ctx->max_db; i++) {
1675 /* at least highest bit will be set if backend is used */
1676 if (results[i*4 + 1])
1677 mask |= (1<<i);
1678 }
1679 }
1680 }
1681
1682 r600_resource_reference(&buffer, NULL);
1683
1684 if (mask != 0) {
1685 ctx->backend_mask = mask;
1686 return;
1687 }
1688
1689 err:
1690 /* fallback to old method - set num_backends lower bits to 1 */
1691 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1692 return;
1693 }
1694
1695 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1696 { \
1697 .name = name_, \
1698 .query_type = R600_QUERY_##query_type_, \
1699 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1700 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1701 .group_id = group_id_ \
1702 }
1703
1704 #define X(name_, query_type_, type_, result_type_) \
1705 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1706
1707 #define XG(group_, name_, query_type_, type_, result_type_) \
1708 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1709
1710 static struct pipe_driver_query_info r600_driver_query_list[] = {
1711 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1712 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1713 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1714 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1715 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1716 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1717 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1718 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1719 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1720 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1721 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1722 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1723 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES, UINT64, AVERAGE),
1724 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1725 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1726 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1727 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1728 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1729 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1730 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1731 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
1732 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1733 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1734 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1735 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1736 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1737 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
1738 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1739 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1740
1741 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1742 * which use it as a fallback path to detect the GPU type.
1743 *
1744 * Note: The names of these queries are significant for GPUPerfStudio
1745 * (and possibly their order as well). */
1746 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1747 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1748 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1749 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1750 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1751
1752 /* The following queries must be at the end of the list because their
1753 * availability is adjusted dynamically based on the DRM version. */
1754 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1755 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1756 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
1757 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
1758 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
1759 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
1760 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
1761 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
1762 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
1763 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
1764 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
1765 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
1766 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
1767 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
1768
1769 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1770 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1771 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1772 };
1773
1774 #undef X
1775 #undef XG
1776 #undef XFULL
1777
1778 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1779 {
1780 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1781 return ARRAY_SIZE(r600_driver_query_list);
1782 else if (rscreen->info.drm_major == 3)
1783 return ARRAY_SIZE(r600_driver_query_list) - 3;
1784 else
1785 return ARRAY_SIZE(r600_driver_query_list) - 17;
1786 }
1787
1788 static int r600_get_driver_query_info(struct pipe_screen *screen,
1789 unsigned index,
1790 struct pipe_driver_query_info *info)
1791 {
1792 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1793 unsigned num_queries = r600_get_num_queries(rscreen);
1794
1795 if (!info) {
1796 unsigned num_perfcounters =
1797 r600_get_perfcounter_info(rscreen, 0, NULL);
1798
1799 return num_queries + num_perfcounters;
1800 }
1801
1802 if (index >= num_queries)
1803 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1804
1805 *info = r600_driver_query_list[index];
1806
1807 switch (info->query_type) {
1808 case R600_QUERY_REQUESTED_VRAM:
1809 case R600_QUERY_VRAM_USAGE:
1810 case R600_QUERY_MAPPED_VRAM:
1811 info->max_value.u64 = rscreen->info.vram_size;
1812 break;
1813 case R600_QUERY_REQUESTED_GTT:
1814 case R600_QUERY_GTT_USAGE:
1815 case R600_QUERY_MAPPED_GTT:
1816 info->max_value.u64 = rscreen->info.gart_size;
1817 break;
1818 case R600_QUERY_GPU_TEMPERATURE:
1819 info->max_value.u64 = 125;
1820 break;
1821 case R600_QUERY_VRAM_VIS_USAGE:
1822 info->max_value.u64 = rscreen->info.vram_vis_size;
1823 break;
1824 }
1825
1826 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1827 info->group_id += rscreen->perfcounters->num_groups;
1828
1829 return 1;
1830 }
1831
1832 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1833 * performance counter groups, so be careful when changing this and related
1834 * functions.
1835 */
1836 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1837 unsigned index,
1838 struct pipe_driver_query_group_info *info)
1839 {
1840 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1841 unsigned num_pc_groups = 0;
1842
1843 if (rscreen->perfcounters)
1844 num_pc_groups = rscreen->perfcounters->num_groups;
1845
1846 if (!info)
1847 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1848
1849 if (index < num_pc_groups)
1850 return r600_get_perfcounter_group_info(rscreen, index, info);
1851
1852 index -= num_pc_groups;
1853 if (index >= R600_NUM_SW_QUERY_GROUPS)
1854 return 0;
1855
1856 info->name = "GPIN";
1857 info->max_active_queries = 5;
1858 info->num_queries = 5;
1859 return 1;
1860 }
1861
1862 void r600_query_init(struct r600_common_context *rctx)
1863 {
1864 rctx->b.create_query = r600_create_query;
1865 rctx->b.create_batch_query = r600_create_batch_query;
1866 rctx->b.destroy_query = r600_destroy_query;
1867 rctx->b.begin_query = r600_begin_query;
1868 rctx->b.end_query = r600_end_query;
1869 rctx->b.get_query_result = r600_get_query_result;
1870 rctx->b.get_query_result_resource = r600_get_query_result_resource;
1871 rctx->render_cond_atom.emit = r600_emit_query_predication;
1872
1873 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1874 rctx->b.render_condition = r600_render_condition;
1875
1876 LIST_INITHEAD(&rctx->active_queries);
1877 }
1878
1879 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1880 {
1881 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1882 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1883 }