1f9f1910491740784274d7fdb3d710a048286013
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46 /* Fence for GPU_FINISHED. */
47 struct pipe_fence_handle *fence;
48 };
49
50 static void r600_query_sw_destroy(struct r600_common_context *rctx,
51 struct r600_query *rquery)
52 {
53 struct pipe_screen *screen = rctx->b.screen;
54 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
55
56 screen->fence_reference(screen, &query->fence, NULL);
57 FREE(query);
58 }
59
60 static enum radeon_value_id winsys_id_from_type(unsigned type)
61 {
62 switch (type) {
63 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
64 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
65 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
66 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
67 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
68 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
69 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
70 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
71 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
72 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
73 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
74 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
75 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
76 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
77 default: unreachable("query type does not correspond to winsys id");
78 }
79 }
80
81 static bool r600_query_sw_begin(struct r600_common_context *rctx,
82 struct r600_query *rquery)
83 {
84 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
85
86 switch(query->b.type) {
87 case PIPE_QUERY_TIMESTAMP_DISJOINT:
88 case PIPE_QUERY_GPU_FINISHED:
89 break;
90 case R600_QUERY_DRAW_CALLS:
91 query->begin_result = rctx->num_draw_calls;
92 break;
93 case R600_QUERY_SPILL_DRAW_CALLS:
94 query->begin_result = rctx->num_spill_draw_calls;
95 break;
96 case R600_QUERY_COMPUTE_CALLS:
97 query->begin_result = rctx->num_compute_calls;
98 break;
99 case R600_QUERY_SPILL_COMPUTE_CALLS:
100 query->begin_result = rctx->num_spill_compute_calls;
101 break;
102 case R600_QUERY_DMA_CALLS:
103 query->begin_result = rctx->num_dma_calls;
104 break;
105 case R600_QUERY_CP_DMA_CALLS:
106 query->begin_result = rctx->num_cp_dma_calls;
107 break;
108 case R600_QUERY_NUM_VS_FLUSHES:
109 query->begin_result = rctx->num_vs_flushes;
110 break;
111 case R600_QUERY_NUM_PS_FLUSHES:
112 query->begin_result = rctx->num_ps_flushes;
113 break;
114 case R600_QUERY_NUM_CS_FLUSHES:
115 query->begin_result = rctx->num_cs_flushes;
116 break;
117 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
118 query->begin_result = rctx->num_fb_cache_flushes;
119 break;
120 case R600_QUERY_NUM_L2_INVALIDATES:
121 query->begin_result = rctx->num_L2_invalidates;
122 break;
123 case R600_QUERY_NUM_L2_WRITEBACKS:
124 query->begin_result = rctx->num_L2_writebacks;
125 break;
126 case R600_QUERY_REQUESTED_VRAM:
127 case R600_QUERY_REQUESTED_GTT:
128 case R600_QUERY_MAPPED_VRAM:
129 case R600_QUERY_MAPPED_GTT:
130 case R600_QUERY_VRAM_USAGE:
131 case R600_QUERY_GTT_USAGE:
132 case R600_QUERY_GPU_TEMPERATURE:
133 case R600_QUERY_CURRENT_GPU_SCLK:
134 case R600_QUERY_CURRENT_GPU_MCLK:
135 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
136 query->begin_result = 0;
137 break;
138 case R600_QUERY_BUFFER_WAIT_TIME:
139 case R600_QUERY_NUM_GFX_IBS:
140 case R600_QUERY_NUM_SDMA_IBS:
141 case R600_QUERY_NUM_BYTES_MOVED:
142 case R600_QUERY_NUM_EVICTIONS: {
143 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
144 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
145 break;
146 }
147 case R600_QUERY_GPU_LOAD:
148 case R600_QUERY_GPU_SHADERS_BUSY:
149 query->begin_result = r600_begin_counter(rctx->screen,
150 query->b.type);
151 break;
152 case R600_QUERY_NUM_COMPILATIONS:
153 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
154 break;
155 case R600_QUERY_NUM_SHADERS_CREATED:
156 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
157 break;
158 case R600_QUERY_NUM_SHADER_CACHE_HITS:
159 query->begin_result =
160 p_atomic_read(&rctx->screen->num_shader_cache_hits);
161 break;
162 case R600_QUERY_GPIN_ASIC_ID:
163 case R600_QUERY_GPIN_NUM_SIMD:
164 case R600_QUERY_GPIN_NUM_RB:
165 case R600_QUERY_GPIN_NUM_SPI:
166 case R600_QUERY_GPIN_NUM_SE:
167 break;
168 default:
169 unreachable("r600_query_sw_begin: bad query type");
170 }
171
172 return true;
173 }
174
175 static bool r600_query_sw_end(struct r600_common_context *rctx,
176 struct r600_query *rquery)
177 {
178 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
179
180 switch(query->b.type) {
181 case PIPE_QUERY_TIMESTAMP_DISJOINT:
182 break;
183 case PIPE_QUERY_GPU_FINISHED:
184 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
185 break;
186 case R600_QUERY_DRAW_CALLS:
187 query->end_result = rctx->num_draw_calls;
188 break;
189 case R600_QUERY_SPILL_DRAW_CALLS:
190 query->end_result = rctx->num_spill_draw_calls;
191 break;
192 case R600_QUERY_COMPUTE_CALLS:
193 query->end_result = rctx->num_compute_calls;
194 break;
195 case R600_QUERY_SPILL_COMPUTE_CALLS:
196 query->end_result = rctx->num_spill_compute_calls;
197 break;
198 case R600_QUERY_DMA_CALLS:
199 query->end_result = rctx->num_dma_calls;
200 break;
201 case R600_QUERY_CP_DMA_CALLS:
202 query->end_result = rctx->num_cp_dma_calls;
203 break;
204 case R600_QUERY_NUM_VS_FLUSHES:
205 query->end_result = rctx->num_vs_flushes;
206 break;
207 case R600_QUERY_NUM_PS_FLUSHES:
208 query->end_result = rctx->num_ps_flushes;
209 break;
210 case R600_QUERY_NUM_CS_FLUSHES:
211 query->end_result = rctx->num_cs_flushes;
212 break;
213 case R600_QUERY_NUM_FB_CACHE_FLUSHES:
214 query->end_result = rctx->num_fb_cache_flushes;
215 break;
216 case R600_QUERY_NUM_L2_INVALIDATES:
217 query->end_result = rctx->num_L2_invalidates;
218 break;
219 case R600_QUERY_NUM_L2_WRITEBACKS:
220 query->end_result = rctx->num_L2_writebacks;
221 break;
222 case R600_QUERY_REQUESTED_VRAM:
223 case R600_QUERY_REQUESTED_GTT:
224 case R600_QUERY_MAPPED_VRAM:
225 case R600_QUERY_MAPPED_GTT:
226 case R600_QUERY_VRAM_USAGE:
227 case R600_QUERY_GTT_USAGE:
228 case R600_QUERY_GPU_TEMPERATURE:
229 case R600_QUERY_CURRENT_GPU_SCLK:
230 case R600_QUERY_CURRENT_GPU_MCLK:
231 case R600_QUERY_BUFFER_WAIT_TIME:
232 case R600_QUERY_NUM_GFX_IBS:
233 case R600_QUERY_NUM_SDMA_IBS:
234 case R600_QUERY_NUM_BYTES_MOVED:
235 case R600_QUERY_NUM_EVICTIONS: {
236 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
237 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
238 break;
239 }
240 case R600_QUERY_GPU_LOAD:
241 case R600_QUERY_GPU_SHADERS_BUSY:
242 query->end_result = r600_end_counter(rctx->screen,
243 query->b.type,
244 query->begin_result);
245 query->begin_result = 0;
246 break;
247 case R600_QUERY_NUM_COMPILATIONS:
248 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
249 break;
250 case R600_QUERY_NUM_SHADERS_CREATED:
251 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
252 break;
253 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
254 query->end_result = rctx->last_tex_ps_draw_ratio;
255 break;
256 case R600_QUERY_NUM_SHADER_CACHE_HITS:
257 query->end_result =
258 p_atomic_read(&rctx->screen->num_shader_cache_hits);
259 break;
260 case R600_QUERY_GPIN_ASIC_ID:
261 case R600_QUERY_GPIN_NUM_SIMD:
262 case R600_QUERY_GPIN_NUM_RB:
263 case R600_QUERY_GPIN_NUM_SPI:
264 case R600_QUERY_GPIN_NUM_SE:
265 break;
266 default:
267 unreachable("r600_query_sw_end: bad query type");
268 }
269
270 return true;
271 }
272
273 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
274 struct r600_query *rquery,
275 bool wait,
276 union pipe_query_result *result)
277 {
278 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
279
280 switch (query->b.type) {
281 case PIPE_QUERY_TIMESTAMP_DISJOINT:
282 /* Convert from cycles per millisecond to cycles per second (Hz). */
283 result->timestamp_disjoint.frequency =
284 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
285 result->timestamp_disjoint.disjoint = false;
286 return true;
287 case PIPE_QUERY_GPU_FINISHED: {
288 struct pipe_screen *screen = rctx->b.screen;
289 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
290 wait ? PIPE_TIMEOUT_INFINITE : 0);
291 return result->b;
292 }
293
294 case R600_QUERY_GPIN_ASIC_ID:
295 result->u32 = 0;
296 return true;
297 case R600_QUERY_GPIN_NUM_SIMD:
298 result->u32 = rctx->screen->info.num_good_compute_units;
299 return true;
300 case R600_QUERY_GPIN_NUM_RB:
301 result->u32 = rctx->screen->info.num_render_backends;
302 return true;
303 case R600_QUERY_GPIN_NUM_SPI:
304 result->u32 = 1; /* all supported chips have one SPI per SE */
305 return true;
306 case R600_QUERY_GPIN_NUM_SE:
307 result->u32 = rctx->screen->info.max_se;
308 return true;
309 }
310
311 result->u64 = query->end_result - query->begin_result;
312
313 switch (query->b.type) {
314 case R600_QUERY_BUFFER_WAIT_TIME:
315 case R600_QUERY_GPU_TEMPERATURE:
316 result->u64 /= 1000;
317 break;
318 case R600_QUERY_CURRENT_GPU_SCLK:
319 case R600_QUERY_CURRENT_GPU_MCLK:
320 result->u64 *= 1000000;
321 break;
322 }
323
324 return true;
325 }
326
327
328 static struct r600_query_ops sw_query_ops = {
329 .destroy = r600_query_sw_destroy,
330 .begin = r600_query_sw_begin,
331 .end = r600_query_sw_end,
332 .get_result = r600_query_sw_get_result,
333 .get_result_resource = NULL
334 };
335
336 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
337 unsigned query_type)
338 {
339 struct r600_query_sw *query;
340
341 query = CALLOC_STRUCT(r600_query_sw);
342 if (!query)
343 return NULL;
344
345 query->b.type = query_type;
346 query->b.ops = &sw_query_ops;
347
348 return (struct pipe_query *)query;
349 }
350
351 void r600_query_hw_destroy(struct r600_common_context *rctx,
352 struct r600_query *rquery)
353 {
354 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
355 struct r600_query_buffer *prev = query->buffer.previous;
356
357 /* Release all query buffers. */
358 while (prev) {
359 struct r600_query_buffer *qbuf = prev;
360 prev = prev->previous;
361 r600_resource_reference(&qbuf->buf, NULL);
362 FREE(qbuf);
363 }
364
365 r600_resource_reference(&query->buffer.buf, NULL);
366 FREE(rquery);
367 }
368
369 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
370 struct r600_query_hw *query)
371 {
372 unsigned buf_size = MAX2(query->result_size,
373 ctx->screen->info.min_alloc_size);
374
375 /* Queries are normally read by the CPU after
376 * being written by the gpu, hence staging is probably a good
377 * usage pattern.
378 */
379 struct r600_resource *buf = (struct r600_resource*)
380 pipe_buffer_create(ctx->b.screen, 0,
381 PIPE_USAGE_STAGING, buf_size);
382 if (!buf)
383 return NULL;
384
385 if (!query->ops->prepare_buffer(ctx, query, buf)) {
386 r600_resource_reference(&buf, NULL);
387 return NULL;
388 }
389
390 return buf;
391 }
392
393 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
394 struct r600_query_hw *query,
395 struct r600_resource *buffer)
396 {
397 /* Callers ensure that the buffer is currently unused by the GPU. */
398 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
399 PIPE_TRANSFER_WRITE |
400 PIPE_TRANSFER_UNSYNCHRONIZED);
401 if (!results)
402 return false;
403
404 memset(results, 0, buffer->b.b.width0);
405
406 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
407 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
408 unsigned num_results;
409 unsigned i, j;
410
411 /* Set top bits for unused backends. */
412 num_results = buffer->b.b.width0 / query->result_size;
413 for (j = 0; j < num_results; j++) {
414 for (i = 0; i < ctx->max_db; i++) {
415 if (!(ctx->backend_mask & (1<<i))) {
416 results[(i * 4)+1] = 0x80000000;
417 results[(i * 4)+3] = 0x80000000;
418 }
419 }
420 results += 4 * ctx->max_db;
421 }
422 }
423
424 return true;
425 }
426
427 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
428 struct r600_query *rquery,
429 bool wait,
430 enum pipe_query_value_type result_type,
431 int index,
432 struct pipe_resource *resource,
433 unsigned offset);
434
435 static struct r600_query_ops query_hw_ops = {
436 .destroy = r600_query_hw_destroy,
437 .begin = r600_query_hw_begin,
438 .end = r600_query_hw_end,
439 .get_result = r600_query_hw_get_result,
440 .get_result_resource = r600_query_hw_get_result_resource,
441 };
442
443 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
444 struct r600_query_hw *query,
445 struct r600_resource *buffer,
446 uint64_t va);
447 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
448 struct r600_query_hw *query,
449 struct r600_resource *buffer,
450 uint64_t va);
451 static void r600_query_hw_add_result(struct r600_common_context *ctx,
452 struct r600_query_hw *, void *buffer,
453 union pipe_query_result *result);
454 static void r600_query_hw_clear_result(struct r600_query_hw *,
455 union pipe_query_result *);
456
457 static struct r600_query_hw_ops query_hw_default_hw_ops = {
458 .prepare_buffer = r600_query_hw_prepare_buffer,
459 .emit_start = r600_query_hw_do_emit_start,
460 .emit_stop = r600_query_hw_do_emit_stop,
461 .clear_result = r600_query_hw_clear_result,
462 .add_result = r600_query_hw_add_result,
463 };
464
465 bool r600_query_hw_init(struct r600_common_context *rctx,
466 struct r600_query_hw *query)
467 {
468 query->buffer.buf = r600_new_query_buffer(rctx, query);
469 if (!query->buffer.buf)
470 return false;
471
472 return true;
473 }
474
475 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
476 unsigned query_type,
477 unsigned index)
478 {
479 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
480 if (!query)
481 return NULL;
482
483 query->b.type = query_type;
484 query->b.ops = &query_hw_ops;
485 query->ops = &query_hw_default_hw_ops;
486
487 switch (query_type) {
488 case PIPE_QUERY_OCCLUSION_COUNTER:
489 case PIPE_QUERY_OCCLUSION_PREDICATE:
490 query->result_size = 16 * rctx->max_db;
491 query->result_size += 16; /* for the fence + alignment */
492 query->num_cs_dw_begin = 6;
493 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
494 break;
495 case PIPE_QUERY_TIME_ELAPSED:
496 query->result_size = 24;
497 query->num_cs_dw_begin = 8;
498 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
499 break;
500 case PIPE_QUERY_TIMESTAMP:
501 query->result_size = 16;
502 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
503 query->flags = R600_QUERY_HW_FLAG_NO_START;
504 break;
505 case PIPE_QUERY_PRIMITIVES_EMITTED:
506 case PIPE_QUERY_PRIMITIVES_GENERATED:
507 case PIPE_QUERY_SO_STATISTICS:
508 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
509 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
510 query->result_size = 32;
511 query->num_cs_dw_begin = 6;
512 query->num_cs_dw_end = 6;
513 query->stream = index;
514 break;
515 case PIPE_QUERY_PIPELINE_STATISTICS:
516 /* 11 values on EG, 8 on R600. */
517 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
518 query->result_size += 8; /* for the fence + alignment */
519 query->num_cs_dw_begin = 6;
520 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
521 break;
522 default:
523 assert(0);
524 FREE(query);
525 return NULL;
526 }
527
528 if (!r600_query_hw_init(rctx, query)) {
529 FREE(query);
530 return NULL;
531 }
532
533 return (struct pipe_query *)query;
534 }
535
536 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
537 unsigned type, int diff)
538 {
539 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
540 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
541 bool old_enable = rctx->num_occlusion_queries != 0;
542 bool old_perfect_enable =
543 rctx->num_perfect_occlusion_queries != 0;
544 bool enable, perfect_enable;
545
546 rctx->num_occlusion_queries += diff;
547 assert(rctx->num_occlusion_queries >= 0);
548
549 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
550 rctx->num_perfect_occlusion_queries += diff;
551 assert(rctx->num_perfect_occlusion_queries >= 0);
552 }
553
554 enable = rctx->num_occlusion_queries != 0;
555 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
556
557 if (enable != old_enable || perfect_enable != old_perfect_enable) {
558 rctx->set_occlusion_query_state(&rctx->b, enable);
559 }
560 }
561 }
562
563 static unsigned event_type_for_stream(struct r600_query_hw *query)
564 {
565 switch (query->stream) {
566 default:
567 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
568 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
569 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
570 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
571 }
572 }
573
574 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
575 struct r600_query_hw *query,
576 struct r600_resource *buffer,
577 uint64_t va)
578 {
579 struct radeon_winsys_cs *cs = ctx->gfx.cs;
580
581 switch (query->b.type) {
582 case PIPE_QUERY_OCCLUSION_COUNTER:
583 case PIPE_QUERY_OCCLUSION_PREDICATE:
584 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
585 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
586 radeon_emit(cs, va);
587 radeon_emit(cs, (va >> 32) & 0xFFFF);
588 break;
589 case PIPE_QUERY_PRIMITIVES_EMITTED:
590 case PIPE_QUERY_PRIMITIVES_GENERATED:
591 case PIPE_QUERY_SO_STATISTICS:
592 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
593 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
594 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
595 radeon_emit(cs, va);
596 radeon_emit(cs, (va >> 32) & 0xFFFF);
597 break;
598 case PIPE_QUERY_TIME_ELAPSED:
599 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
600 0, 3, NULL, va, 0, 0);
601 break;
602 case PIPE_QUERY_PIPELINE_STATISTICS:
603 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
604 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
605 radeon_emit(cs, va);
606 radeon_emit(cs, (va >> 32) & 0xFFFF);
607 break;
608 default:
609 assert(0);
610 }
611 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
612 RADEON_PRIO_QUERY);
613 }
614
615 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
616 struct r600_query_hw *query)
617 {
618 uint64_t va;
619
620 if (!query->buffer.buf)
621 return; // previous buffer allocation failure
622
623 r600_update_occlusion_query_state(ctx, query->b.type, 1);
624 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
625
626 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
627 true);
628
629 /* Get a new query buffer if needed. */
630 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
631 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
632 *qbuf = query->buffer;
633 query->buffer.results_end = 0;
634 query->buffer.previous = qbuf;
635 query->buffer.buf = r600_new_query_buffer(ctx, query);
636 if (!query->buffer.buf)
637 return;
638 }
639
640 /* emit begin query */
641 va = query->buffer.buf->gpu_address + query->buffer.results_end;
642
643 query->ops->emit_start(ctx, query, query->buffer.buf, va);
644
645 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
646 }
647
648 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
649 struct r600_query_hw *query,
650 struct r600_resource *buffer,
651 uint64_t va)
652 {
653 struct radeon_winsys_cs *cs = ctx->gfx.cs;
654 uint64_t fence_va = 0;
655
656 switch (query->b.type) {
657 case PIPE_QUERY_OCCLUSION_COUNTER:
658 case PIPE_QUERY_OCCLUSION_PREDICATE:
659 va += 8;
660 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
661 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
662 radeon_emit(cs, va);
663 radeon_emit(cs, (va >> 32) & 0xFFFF);
664
665 fence_va = va + ctx->max_db * 16 - 8;
666 break;
667 case PIPE_QUERY_PRIMITIVES_EMITTED:
668 case PIPE_QUERY_PRIMITIVES_GENERATED:
669 case PIPE_QUERY_SO_STATISTICS:
670 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
671 va += query->result_size/2;
672 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
673 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
674 radeon_emit(cs, va);
675 radeon_emit(cs, (va >> 32) & 0xFFFF);
676 break;
677 case PIPE_QUERY_TIME_ELAPSED:
678 va += 8;
679 /* fall through */
680 case PIPE_QUERY_TIMESTAMP:
681 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
682 0, 3, NULL, va, 0, 0);
683 fence_va = va + 8;
684 break;
685 case PIPE_QUERY_PIPELINE_STATISTICS: {
686 unsigned sample_size = (query->result_size - 8) / 2;
687
688 va += sample_size;
689 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
690 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
691 radeon_emit(cs, va);
692 radeon_emit(cs, (va >> 32) & 0xFFFF);
693
694 fence_va = va + sample_size;
695 break;
696 }
697 default:
698 assert(0);
699 }
700 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
701 RADEON_PRIO_QUERY);
702
703 if (fence_va)
704 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
705 query->buffer.buf, fence_va, 0, 0x80000000);
706 }
707
708 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
709 struct r600_query_hw *query)
710 {
711 uint64_t va;
712
713 if (!query->buffer.buf)
714 return; // previous buffer allocation failure
715
716 /* The queries which need begin already called this in begin_query. */
717 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
718 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
719 }
720
721 /* emit end query */
722 va = query->buffer.buf->gpu_address + query->buffer.results_end;
723
724 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
725
726 query->buffer.results_end += query->result_size;
727
728 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
729 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
730
731 r600_update_occlusion_query_state(ctx, query->b.type, -1);
732 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
733 }
734
735 static void r600_emit_query_predication(struct r600_common_context *ctx,
736 struct r600_atom *atom)
737 {
738 struct radeon_winsys_cs *cs = ctx->gfx.cs;
739 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
740 struct r600_query_buffer *qbuf;
741 uint32_t op;
742 bool flag_wait;
743
744 if (!query)
745 return;
746
747 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
748 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
749
750 switch (query->b.type) {
751 case PIPE_QUERY_OCCLUSION_COUNTER:
752 case PIPE_QUERY_OCCLUSION_PREDICATE:
753 op = PRED_OP(PREDICATION_OP_ZPASS);
754 break;
755 case PIPE_QUERY_PRIMITIVES_EMITTED:
756 case PIPE_QUERY_PRIMITIVES_GENERATED:
757 case PIPE_QUERY_SO_STATISTICS:
758 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
759 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
760 break;
761 default:
762 assert(0);
763 return;
764 }
765
766 /* if true then invert, see GL_ARB_conditional_render_inverted */
767 if (ctx->render_cond_invert)
768 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
769 else
770 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
771
772 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
773
774 /* emit predicate packets for all data blocks */
775 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
776 unsigned results_base = 0;
777 uint64_t va = qbuf->buf->gpu_address;
778
779 while (results_base < qbuf->results_end) {
780 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
781 radeon_emit(cs, va + results_base);
782 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
783 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
784 RADEON_PRIO_QUERY);
785 results_base += query->result_size;
786
787 /* set CONTINUE bit for all packets except the first */
788 op |= PREDICATION_CONTINUE;
789 }
790 }
791 }
792
793 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
794 {
795 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
796
797 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
798 query_type == PIPE_QUERY_GPU_FINISHED ||
799 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
800 return r600_query_sw_create(ctx, query_type);
801
802 return r600_query_hw_create(rctx, query_type, index);
803 }
804
805 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
806 {
807 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
808 struct r600_query *rquery = (struct r600_query *)query;
809
810 rquery->ops->destroy(rctx, rquery);
811 }
812
813 static boolean r600_begin_query(struct pipe_context *ctx,
814 struct pipe_query *query)
815 {
816 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
817 struct r600_query *rquery = (struct r600_query *)query;
818
819 return rquery->ops->begin(rctx, rquery);
820 }
821
822 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
823 struct r600_query_hw *query)
824 {
825 struct r600_query_buffer *prev = query->buffer.previous;
826
827 /* Discard the old query buffers. */
828 while (prev) {
829 struct r600_query_buffer *qbuf = prev;
830 prev = prev->previous;
831 r600_resource_reference(&qbuf->buf, NULL);
832 FREE(qbuf);
833 }
834
835 query->buffer.results_end = 0;
836 query->buffer.previous = NULL;
837
838 /* Obtain a new buffer if the current one can't be mapped without a stall. */
839 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
840 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
841 r600_resource_reference(&query->buffer.buf, NULL);
842 query->buffer.buf = r600_new_query_buffer(rctx, query);
843 } else {
844 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
845 r600_resource_reference(&query->buffer.buf, NULL);
846 }
847 }
848
849 bool r600_query_hw_begin(struct r600_common_context *rctx,
850 struct r600_query *rquery)
851 {
852 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
853
854 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
855 assert(0);
856 return false;
857 }
858
859 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
860 r600_query_hw_reset_buffers(rctx, query);
861
862 r600_query_hw_emit_start(rctx, query);
863 if (!query->buffer.buf)
864 return false;
865
866 LIST_ADDTAIL(&query->list, &rctx->active_queries);
867 return true;
868 }
869
870 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
871 {
872 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
873 struct r600_query *rquery = (struct r600_query *)query;
874
875 return rquery->ops->end(rctx, rquery);
876 }
877
878 bool r600_query_hw_end(struct r600_common_context *rctx,
879 struct r600_query *rquery)
880 {
881 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
882
883 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
884 r600_query_hw_reset_buffers(rctx, query);
885
886 r600_query_hw_emit_stop(rctx, query);
887
888 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
889 LIST_DELINIT(&query->list);
890
891 if (!query->buffer.buf)
892 return false;
893
894 return true;
895 }
896
897 static void r600_get_hw_query_params(struct r600_common_context *rctx,
898 struct r600_query_hw *rquery, int index,
899 struct r600_hw_query_params *params)
900 {
901 params->pair_stride = 0;
902 params->pair_count = 1;
903
904 switch (rquery->b.type) {
905 case PIPE_QUERY_OCCLUSION_COUNTER:
906 case PIPE_QUERY_OCCLUSION_PREDICATE:
907 params->start_offset = 0;
908 params->end_offset = 8;
909 params->fence_offset = rctx->max_db * 16;
910 params->pair_stride = 16;
911 params->pair_count = rctx->max_db;
912 break;
913 case PIPE_QUERY_TIME_ELAPSED:
914 params->start_offset = 0;
915 params->end_offset = 8;
916 params->fence_offset = 16;
917 break;
918 case PIPE_QUERY_TIMESTAMP:
919 params->start_offset = 0;
920 params->end_offset = 0;
921 params->fence_offset = 8;
922 break;
923 case PIPE_QUERY_PRIMITIVES_EMITTED:
924 params->start_offset = 8;
925 params->end_offset = 24;
926 params->fence_offset = params->end_offset + 4;
927 break;
928 case PIPE_QUERY_PRIMITIVES_GENERATED:
929 params->start_offset = 0;
930 params->end_offset = 16;
931 params->fence_offset = params->end_offset + 4;
932 break;
933 case PIPE_QUERY_SO_STATISTICS:
934 params->start_offset = 8 - index * 8;
935 params->end_offset = 24 - index * 8;
936 params->fence_offset = params->end_offset + 4;
937 break;
938 case PIPE_QUERY_PIPELINE_STATISTICS:
939 {
940 /* Offsets apply to EG+ */
941 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
942 params->start_offset = offsets[index];
943 params->end_offset = 88 + offsets[index];
944 params->fence_offset = 2 * 88;
945 break;
946 }
947 default:
948 unreachable("r600_get_hw_query_params unsupported");
949 }
950 }
951
952 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
953 bool test_status_bit)
954 {
955 uint32_t *current_result = (uint32_t*)map;
956 uint64_t start, end;
957
958 start = (uint64_t)current_result[start_index] |
959 (uint64_t)current_result[start_index+1] << 32;
960 end = (uint64_t)current_result[end_index] |
961 (uint64_t)current_result[end_index+1] << 32;
962
963 if (!test_status_bit ||
964 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
965 return end - start;
966 }
967 return 0;
968 }
969
970 static void r600_query_hw_add_result(struct r600_common_context *ctx,
971 struct r600_query_hw *query,
972 void *buffer,
973 union pipe_query_result *result)
974 {
975 switch (query->b.type) {
976 case PIPE_QUERY_OCCLUSION_COUNTER: {
977 for (unsigned i = 0; i < ctx->max_db; ++i) {
978 unsigned results_base = i * 16;
979 result->u64 +=
980 r600_query_read_result(buffer + results_base, 0, 2, true);
981 }
982 break;
983 }
984 case PIPE_QUERY_OCCLUSION_PREDICATE: {
985 for (unsigned i = 0; i < ctx->max_db; ++i) {
986 unsigned results_base = i * 16;
987 result->b = result->b ||
988 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
989 }
990 break;
991 }
992 case PIPE_QUERY_TIME_ELAPSED:
993 result->u64 += r600_query_read_result(buffer, 0, 2, false);
994 break;
995 case PIPE_QUERY_TIMESTAMP:
996 result->u64 = *(uint64_t*)buffer;
997 break;
998 case PIPE_QUERY_PRIMITIVES_EMITTED:
999 /* SAMPLE_STREAMOUTSTATS stores this structure:
1000 * {
1001 * u64 NumPrimitivesWritten;
1002 * u64 PrimitiveStorageNeeded;
1003 * }
1004 * We only need NumPrimitivesWritten here. */
1005 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1006 break;
1007 case PIPE_QUERY_PRIMITIVES_GENERATED:
1008 /* Here we read PrimitiveStorageNeeded. */
1009 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1010 break;
1011 case PIPE_QUERY_SO_STATISTICS:
1012 result->so_statistics.num_primitives_written +=
1013 r600_query_read_result(buffer, 2, 6, true);
1014 result->so_statistics.primitives_storage_needed +=
1015 r600_query_read_result(buffer, 0, 4, true);
1016 break;
1017 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1018 result->b = result->b ||
1019 r600_query_read_result(buffer, 2, 6, true) !=
1020 r600_query_read_result(buffer, 0, 4, true);
1021 break;
1022 case PIPE_QUERY_PIPELINE_STATISTICS:
1023 if (ctx->chip_class >= EVERGREEN) {
1024 result->pipeline_statistics.ps_invocations +=
1025 r600_query_read_result(buffer, 0, 22, false);
1026 result->pipeline_statistics.c_primitives +=
1027 r600_query_read_result(buffer, 2, 24, false);
1028 result->pipeline_statistics.c_invocations +=
1029 r600_query_read_result(buffer, 4, 26, false);
1030 result->pipeline_statistics.vs_invocations +=
1031 r600_query_read_result(buffer, 6, 28, false);
1032 result->pipeline_statistics.gs_invocations +=
1033 r600_query_read_result(buffer, 8, 30, false);
1034 result->pipeline_statistics.gs_primitives +=
1035 r600_query_read_result(buffer, 10, 32, false);
1036 result->pipeline_statistics.ia_primitives +=
1037 r600_query_read_result(buffer, 12, 34, false);
1038 result->pipeline_statistics.ia_vertices +=
1039 r600_query_read_result(buffer, 14, 36, false);
1040 result->pipeline_statistics.hs_invocations +=
1041 r600_query_read_result(buffer, 16, 38, false);
1042 result->pipeline_statistics.ds_invocations +=
1043 r600_query_read_result(buffer, 18, 40, false);
1044 result->pipeline_statistics.cs_invocations +=
1045 r600_query_read_result(buffer, 20, 42, false);
1046 } else {
1047 result->pipeline_statistics.ps_invocations +=
1048 r600_query_read_result(buffer, 0, 16, false);
1049 result->pipeline_statistics.c_primitives +=
1050 r600_query_read_result(buffer, 2, 18, false);
1051 result->pipeline_statistics.c_invocations +=
1052 r600_query_read_result(buffer, 4, 20, false);
1053 result->pipeline_statistics.vs_invocations +=
1054 r600_query_read_result(buffer, 6, 22, false);
1055 result->pipeline_statistics.gs_invocations +=
1056 r600_query_read_result(buffer, 8, 24, false);
1057 result->pipeline_statistics.gs_primitives +=
1058 r600_query_read_result(buffer, 10, 26, false);
1059 result->pipeline_statistics.ia_primitives +=
1060 r600_query_read_result(buffer, 12, 28, false);
1061 result->pipeline_statistics.ia_vertices +=
1062 r600_query_read_result(buffer, 14, 30, false);
1063 }
1064 #if 0 /* for testing */
1065 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1066 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1067 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1068 result->pipeline_statistics.ia_vertices,
1069 result->pipeline_statistics.ia_primitives,
1070 result->pipeline_statistics.vs_invocations,
1071 result->pipeline_statistics.hs_invocations,
1072 result->pipeline_statistics.ds_invocations,
1073 result->pipeline_statistics.gs_invocations,
1074 result->pipeline_statistics.gs_primitives,
1075 result->pipeline_statistics.c_invocations,
1076 result->pipeline_statistics.c_primitives,
1077 result->pipeline_statistics.ps_invocations,
1078 result->pipeline_statistics.cs_invocations);
1079 #endif
1080 break;
1081 default:
1082 assert(0);
1083 }
1084 }
1085
1086 static boolean r600_get_query_result(struct pipe_context *ctx,
1087 struct pipe_query *query, boolean wait,
1088 union pipe_query_result *result)
1089 {
1090 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1091 struct r600_query *rquery = (struct r600_query *)query;
1092
1093 return rquery->ops->get_result(rctx, rquery, wait, result);
1094 }
1095
1096 static void r600_get_query_result_resource(struct pipe_context *ctx,
1097 struct pipe_query *query,
1098 boolean wait,
1099 enum pipe_query_value_type result_type,
1100 int index,
1101 struct pipe_resource *resource,
1102 unsigned offset)
1103 {
1104 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1105 struct r600_query *rquery = (struct r600_query *)query;
1106
1107 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1108 resource, offset);
1109 }
1110
1111 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1112 union pipe_query_result *result)
1113 {
1114 util_query_clear_result(result, query->b.type);
1115 }
1116
1117 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1118 struct r600_query *rquery,
1119 bool wait, union pipe_query_result *result)
1120 {
1121 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1122 struct r600_query_buffer *qbuf;
1123
1124 query->ops->clear_result(query, result);
1125
1126 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1127 unsigned results_base = 0;
1128 void *map;
1129
1130 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1131 PIPE_TRANSFER_READ |
1132 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1133 if (!map)
1134 return false;
1135
1136 while (results_base != qbuf->results_end) {
1137 query->ops->add_result(rctx, query, map + results_base,
1138 result);
1139 results_base += query->result_size;
1140 }
1141 }
1142
1143 /* Convert the time to expected units. */
1144 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1145 rquery->type == PIPE_QUERY_TIMESTAMP) {
1146 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1147 }
1148 return true;
1149 }
1150
1151 /* Create the compute shader that is used to collect the results.
1152 *
1153 * One compute grid with a single thread is launched for every query result
1154 * buffer. The thread (optionally) reads a previous summary buffer, then
1155 * accumulates data from the query result buffer, and writes the result either
1156 * to a summary buffer to be consumed by the next grid invocation or to the
1157 * user-supplied buffer.
1158 *
1159 * Data layout:
1160 *
1161 * CONST
1162 * 0.x = end_offset
1163 * 0.y = result_stride
1164 * 0.z = result_count
1165 * 0.w = bit field:
1166 * 1: read previously accumulated values
1167 * 2: write accumulated values for chaining
1168 * 4: write result available
1169 * 8: convert result to boolean (0/1)
1170 * 16: only read one dword and use that as result
1171 * 32: apply timestamp conversion
1172 * 64: store full 64 bits result
1173 * 128: store signed 32 bits result
1174 * 1.x = fence_offset
1175 * 1.y = pair_stride
1176 * 1.z = pair_count
1177 *
1178 * BUFFER[0] = query result buffer
1179 * BUFFER[1] = previous summary buffer
1180 * BUFFER[2] = next summary buffer or user-supplied buffer
1181 */
1182 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1183 {
1184 /* TEMP[0].xy = accumulated result so far
1185 * TEMP[0].z = result not available
1186 *
1187 * TEMP[1].x = current result index
1188 * TEMP[1].y = current pair index
1189 */
1190 static const char text_tmpl[] =
1191 "COMP\n"
1192 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1193 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1194 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1195 "DCL BUFFER[0]\n"
1196 "DCL BUFFER[1]\n"
1197 "DCL BUFFER[2]\n"
1198 "DCL CONST[0..1]\n"
1199 "DCL TEMP[0..5]\n"
1200 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1201 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1202 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1203 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1204
1205 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1206 "UIF TEMP[5]\n"
1207 /* Check result availability. */
1208 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1209 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1210 "MOV TEMP[1], TEMP[0].zzzz\n"
1211 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1212
1213 /* Load result if available. */
1214 "UIF TEMP[1]\n"
1215 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1216 "ENDIF\n"
1217 "ELSE\n"
1218 /* Load previously accumulated result if requested. */
1219 "MOV TEMP[0], IMM[0].xxxx\n"
1220 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1221 "UIF TEMP[4]\n"
1222 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1223 "ENDIF\n"
1224
1225 "MOV TEMP[1].x, IMM[0].xxxx\n"
1226 "BGNLOOP\n"
1227 /* Break if accumulated result so far is not available. */
1228 "UIF TEMP[0].zzzz\n"
1229 "BRK\n"
1230 "ENDIF\n"
1231
1232 /* Break if result_index >= result_count. */
1233 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1234 "UIF TEMP[5]\n"
1235 "BRK\n"
1236 "ENDIF\n"
1237
1238 /* Load fence and check result availability */
1239 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1240 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1241 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1242 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1243 "UIF TEMP[0].zzzz\n"
1244 "BRK\n"
1245 "ENDIF\n"
1246
1247 "MOV TEMP[1].y, IMM[0].xxxx\n"
1248 "BGNLOOP\n"
1249 /* Load start and end. */
1250 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1251 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1252 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1253
1254 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1255 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1256
1257 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1258 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1259
1260 /* Increment pair index */
1261 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1262 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1263 "UIF TEMP[5]\n"
1264 "BRK\n"
1265 "ENDIF\n"
1266 "ENDLOOP\n"
1267
1268 /* Increment result index */
1269 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1270 "ENDLOOP\n"
1271 "ENDIF\n"
1272
1273 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1274 "UIF TEMP[4]\n"
1275 /* Store accumulated data for chaining. */
1276 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1277 "ELSE\n"
1278 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1279 "UIF TEMP[4]\n"
1280 /* Store result availability. */
1281 "NOT TEMP[0].z, TEMP[0]\n"
1282 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1283 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1284
1285 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1286 "UIF TEMP[4]\n"
1287 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1288 "ENDIF\n"
1289 "ELSE\n"
1290 /* Store result if it is available. */
1291 "NOT TEMP[4], TEMP[0].zzzz\n"
1292 "UIF TEMP[4]\n"
1293 /* Apply timestamp conversion */
1294 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1295 "UIF TEMP[4]\n"
1296 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1297 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1298 "ENDIF\n"
1299
1300 /* Convert to boolean */
1301 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1302 "UIF TEMP[4]\n"
1303 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1304 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1305 "MOV TEMP[0].y, IMM[0].xxxx\n"
1306 "ENDIF\n"
1307
1308 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1309 "UIF TEMP[4]\n"
1310 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1311 "ELSE\n"
1312 /* Clamping */
1313 "UIF TEMP[0].yyyy\n"
1314 "MOV TEMP[0].x, IMM[0].wwww\n"
1315 "ENDIF\n"
1316
1317 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1318 "UIF TEMP[4]\n"
1319 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1320 "ENDIF\n"
1321
1322 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1323 "ENDIF\n"
1324 "ENDIF\n"
1325 "ENDIF\n"
1326 "ENDIF\n"
1327
1328 "END\n";
1329
1330 char text[sizeof(text_tmpl) + 32];
1331 struct tgsi_token tokens[1024];
1332 struct pipe_compute_state state = {};
1333
1334 /* Hard code the frequency into the shader so that the backend can
1335 * use the full range of optimizations for divide-by-constant.
1336 */
1337 snprintf(text, sizeof(text), text_tmpl,
1338 rctx->screen->info.clock_crystal_freq);
1339
1340 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1341 assert(false);
1342 return;
1343 }
1344
1345 state.ir_type = PIPE_SHADER_IR_TGSI;
1346 state.prog = tokens;
1347
1348 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1349 }
1350
1351 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1352 struct r600_qbo_state *st)
1353 {
1354 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1355
1356 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1357 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1358
1359 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1360 for (unsigned i = 0; i < 3; ++i)
1361 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1362 }
1363
1364 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1365 struct r600_query *rquery,
1366 bool wait,
1367 enum pipe_query_value_type result_type,
1368 int index,
1369 struct pipe_resource *resource,
1370 unsigned offset)
1371 {
1372 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1373 struct r600_query_buffer *qbuf;
1374 struct r600_query_buffer *qbuf_prev;
1375 struct pipe_resource *tmp_buffer = NULL;
1376 unsigned tmp_buffer_offset = 0;
1377 struct r600_qbo_state saved_state = {};
1378 struct pipe_grid_info grid = {};
1379 struct pipe_constant_buffer constant_buffer = {};
1380 struct pipe_shader_buffer ssbo[3];
1381 struct r600_hw_query_params params;
1382 struct {
1383 uint32_t end_offset;
1384 uint32_t result_stride;
1385 uint32_t result_count;
1386 uint32_t config;
1387 uint32_t fence_offset;
1388 uint32_t pair_stride;
1389 uint32_t pair_count;
1390 } consts;
1391
1392 if (!rctx->query_result_shader) {
1393 r600_create_query_result_shader(rctx);
1394 if (!rctx->query_result_shader)
1395 return;
1396 }
1397
1398 if (query->buffer.previous) {
1399 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1400 &tmp_buffer_offset, &tmp_buffer);
1401 if (!tmp_buffer)
1402 return;
1403 }
1404
1405 rctx->save_qbo_state(&rctx->b, &saved_state);
1406
1407 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1408 consts.end_offset = params.end_offset - params.start_offset;
1409 consts.fence_offset = params.fence_offset - params.start_offset;
1410 consts.result_stride = query->result_size;
1411 consts.pair_stride = params.pair_stride;
1412 consts.pair_count = params.pair_count;
1413
1414 constant_buffer.buffer_size = sizeof(consts);
1415 constant_buffer.user_buffer = &consts;
1416
1417 ssbo[1].buffer = tmp_buffer;
1418 ssbo[1].buffer_offset = tmp_buffer_offset;
1419 ssbo[1].buffer_size = 16;
1420
1421 ssbo[2] = ssbo[1];
1422
1423 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1424
1425 grid.block[0] = 1;
1426 grid.block[1] = 1;
1427 grid.block[2] = 1;
1428 grid.grid[0] = 1;
1429 grid.grid[1] = 1;
1430 grid.grid[2] = 1;
1431
1432 consts.config = 0;
1433 if (index < 0)
1434 consts.config |= 4;
1435 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1436 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1437 consts.config |= 8;
1438 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1439 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1440 consts.config |= 32;
1441
1442 switch (result_type) {
1443 case PIPE_QUERY_TYPE_U64:
1444 case PIPE_QUERY_TYPE_I64:
1445 consts.config |= 64;
1446 break;
1447 case PIPE_QUERY_TYPE_I32:
1448 consts.config |= 128;
1449 break;
1450 case PIPE_QUERY_TYPE_U32:
1451 break;
1452 }
1453
1454 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1455
1456 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1457 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1458 qbuf_prev = qbuf->previous;
1459 consts.result_count = qbuf->results_end / query->result_size;
1460 consts.config &= ~3;
1461 if (qbuf != &query->buffer)
1462 consts.config |= 1;
1463 if (qbuf->previous)
1464 consts.config |= 2;
1465 } else {
1466 /* Only read the last timestamp. */
1467 qbuf_prev = NULL;
1468 consts.result_count = 0;
1469 consts.config |= 16;
1470 params.start_offset += qbuf->results_end - query->result_size;
1471 }
1472
1473 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1474
1475 ssbo[0].buffer = &qbuf->buf->b.b;
1476 ssbo[0].buffer_offset = params.start_offset;
1477 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1478
1479 if (!qbuf->previous) {
1480 ssbo[2].buffer = resource;
1481 ssbo[2].buffer_offset = offset;
1482 ssbo[2].buffer_size = 8;
1483
1484 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1485 }
1486
1487 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1488
1489 if (wait && qbuf == &query->buffer) {
1490 uint64_t va;
1491
1492 /* Wait for result availability. Wait only for readiness
1493 * of the last entry, since the fence writes should be
1494 * serialized in the CP.
1495 */
1496 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1497 va += params.fence_offset;
1498
1499 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1500 }
1501
1502 rctx->b.launch_grid(&rctx->b, &grid);
1503 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1504 }
1505
1506 r600_restore_qbo_state(rctx, &saved_state);
1507 pipe_resource_reference(&tmp_buffer, NULL);
1508 }
1509
1510 static void r600_render_condition(struct pipe_context *ctx,
1511 struct pipe_query *query,
1512 boolean condition,
1513 uint mode)
1514 {
1515 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1516 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1517 struct r600_query_buffer *qbuf;
1518 struct r600_atom *atom = &rctx->render_cond_atom;
1519
1520 rctx->render_cond = query;
1521 rctx->render_cond_invert = condition;
1522 rctx->render_cond_mode = mode;
1523
1524 /* Compute the size of SET_PREDICATION packets. */
1525 atom->num_dw = 0;
1526 if (query) {
1527 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1528 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1529 }
1530
1531 rctx->set_atom_dirty(rctx, atom, query != NULL);
1532 }
1533
1534 void r600_suspend_queries(struct r600_common_context *ctx)
1535 {
1536 struct r600_query_hw *query;
1537
1538 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1539 r600_query_hw_emit_stop(ctx, query);
1540 }
1541 assert(ctx->num_cs_dw_queries_suspend == 0);
1542 }
1543
1544 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1545 struct list_head *query_list)
1546 {
1547 struct r600_query_hw *query;
1548 unsigned num_dw = 0;
1549
1550 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1551 /* begin + end */
1552 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1553
1554 /* Workaround for the fact that
1555 * num_cs_dw_nontimer_queries_suspend is incremented for every
1556 * resumed query, which raises the bar in need_cs_space for
1557 * queries about to be resumed.
1558 */
1559 num_dw += query->num_cs_dw_end;
1560 }
1561 /* primitives generated query */
1562 num_dw += ctx->streamout.enable_atom.num_dw;
1563 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1564 num_dw += 13;
1565
1566 return num_dw;
1567 }
1568
1569 void r600_resume_queries(struct r600_common_context *ctx)
1570 {
1571 struct r600_query_hw *query;
1572 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1573
1574 assert(ctx->num_cs_dw_queries_suspend == 0);
1575
1576 /* Check CS space here. Resuming must not be interrupted by flushes. */
1577 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1578
1579 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1580 r600_query_hw_emit_start(ctx, query);
1581 }
1582 }
1583
1584 /* Get backends mask */
1585 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1586 {
1587 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1588 struct r600_resource *buffer;
1589 uint32_t *results;
1590 unsigned num_backends = ctx->screen->info.num_render_backends;
1591 unsigned i, mask = 0;
1592
1593 /* if backend_map query is supported by the kernel */
1594 if (ctx->screen->info.r600_gb_backend_map_valid) {
1595 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1596 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1597 unsigned item_width, item_mask;
1598
1599 if (ctx->chip_class >= EVERGREEN) {
1600 item_width = 4;
1601 item_mask = 0x7;
1602 } else {
1603 item_width = 2;
1604 item_mask = 0x3;
1605 }
1606
1607 while (num_tile_pipes--) {
1608 i = backend_map & item_mask;
1609 mask |= (1<<i);
1610 backend_map >>= item_width;
1611 }
1612 if (mask != 0) {
1613 ctx->backend_mask = mask;
1614 return;
1615 }
1616 }
1617
1618 /* otherwise backup path for older kernels */
1619
1620 /* create buffer for event data */
1621 buffer = (struct r600_resource*)
1622 pipe_buffer_create(ctx->b.screen, 0,
1623 PIPE_USAGE_STAGING, ctx->max_db*16);
1624 if (!buffer)
1625 goto err;
1626
1627 /* initialize buffer with zeroes */
1628 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1629 if (results) {
1630 memset(results, 0, ctx->max_db * 4 * 4);
1631
1632 /* emit EVENT_WRITE for ZPASS_DONE */
1633 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1634 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1635 radeon_emit(cs, buffer->gpu_address);
1636 radeon_emit(cs, buffer->gpu_address >> 32);
1637
1638 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1639 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1640
1641 /* analyze results */
1642 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1643 if (results) {
1644 for(i = 0; i < ctx->max_db; i++) {
1645 /* at least highest bit will be set if backend is used */
1646 if (results[i*4 + 1])
1647 mask |= (1<<i);
1648 }
1649 }
1650 }
1651
1652 r600_resource_reference(&buffer, NULL);
1653
1654 if (mask != 0) {
1655 ctx->backend_mask = mask;
1656 return;
1657 }
1658
1659 err:
1660 /* fallback to old method - set num_backends lower bits to 1 */
1661 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1662 return;
1663 }
1664
1665 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1666 { \
1667 .name = name_, \
1668 .query_type = R600_QUERY_##query_type_, \
1669 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1670 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1671 .group_id = group_id_ \
1672 }
1673
1674 #define X(name_, query_type_, type_, result_type_) \
1675 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1676
1677 #define XG(group_, name_, query_type_, type_, result_type_) \
1678 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1679
1680 static struct pipe_driver_query_info r600_driver_query_list[] = {
1681 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1682 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1683 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
1684 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1685 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1686 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1687 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1688 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1689 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
1690 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1691 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1692 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1693 X("num-fb-cache-flushes", NUM_FB_CACHE_FLUSHES, UINT64, AVERAGE),
1694 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
1695 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
1696 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1697 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1698 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1699 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1700 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1701 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
1702 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
1703 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1704 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1705 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1706 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1707 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1708
1709 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1710 * which use it as a fallback path to detect the GPU type.
1711 *
1712 * Note: The names of these queries are significant for GPUPerfStudio
1713 * (and possibly their order as well). */
1714 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1715 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1716 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1717 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1718 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1719
1720 /* The following queries must be at the end of the list because their
1721 * availability is adjusted dynamically based on the DRM version. */
1722 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1723 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
1724 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1725 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1726 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1727 };
1728
1729 #undef X
1730 #undef XG
1731 #undef XFULL
1732
1733 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1734 {
1735 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1736 return ARRAY_SIZE(r600_driver_query_list);
1737 else if (rscreen->info.drm_major == 3)
1738 return ARRAY_SIZE(r600_driver_query_list) - 3;
1739 else
1740 return ARRAY_SIZE(r600_driver_query_list) - 5;
1741 }
1742
1743 static int r600_get_driver_query_info(struct pipe_screen *screen,
1744 unsigned index,
1745 struct pipe_driver_query_info *info)
1746 {
1747 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1748 unsigned num_queries = r600_get_num_queries(rscreen);
1749
1750 if (!info) {
1751 unsigned num_perfcounters =
1752 r600_get_perfcounter_info(rscreen, 0, NULL);
1753
1754 return num_queries + num_perfcounters;
1755 }
1756
1757 if (index >= num_queries)
1758 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1759
1760 *info = r600_driver_query_list[index];
1761
1762 switch (info->query_type) {
1763 case R600_QUERY_REQUESTED_VRAM:
1764 case R600_QUERY_VRAM_USAGE:
1765 case R600_QUERY_MAPPED_VRAM:
1766 info->max_value.u64 = rscreen->info.vram_size;
1767 break;
1768 case R600_QUERY_REQUESTED_GTT:
1769 case R600_QUERY_GTT_USAGE:
1770 case R600_QUERY_MAPPED_GTT:
1771 info->max_value.u64 = rscreen->info.gart_size;
1772 break;
1773 case R600_QUERY_GPU_TEMPERATURE:
1774 info->max_value.u64 = 125;
1775 break;
1776 }
1777
1778 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1779 info->group_id += rscreen->perfcounters->num_groups;
1780
1781 return 1;
1782 }
1783
1784 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1785 * performance counter groups, so be careful when changing this and related
1786 * functions.
1787 */
1788 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1789 unsigned index,
1790 struct pipe_driver_query_group_info *info)
1791 {
1792 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1793 unsigned num_pc_groups = 0;
1794
1795 if (rscreen->perfcounters)
1796 num_pc_groups = rscreen->perfcounters->num_groups;
1797
1798 if (!info)
1799 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1800
1801 if (index < num_pc_groups)
1802 return r600_get_perfcounter_group_info(rscreen, index, info);
1803
1804 index -= num_pc_groups;
1805 if (index >= R600_NUM_SW_QUERY_GROUPS)
1806 return 0;
1807
1808 info->name = "GPIN";
1809 info->max_active_queries = 5;
1810 info->num_queries = 5;
1811 return 1;
1812 }
1813
1814 void r600_query_init(struct r600_common_context *rctx)
1815 {
1816 rctx->b.create_query = r600_create_query;
1817 rctx->b.create_batch_query = r600_create_batch_query;
1818 rctx->b.destroy_query = r600_destroy_query;
1819 rctx->b.begin_query = r600_begin_query;
1820 rctx->b.end_query = r600_end_query;
1821 rctx->b.get_query_result = r600_get_query_result;
1822 rctx->b.get_query_result_resource = r600_get_query_result_resource;
1823 rctx->render_cond_atom.emit = r600_emit_query_predication;
1824
1825 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1826 rctx->b.render_condition = r600_render_condition;
1827
1828 LIST_INITHEAD(&rctx->active_queries);
1829 }
1830
1831 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1832 {
1833 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1834 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1835 }