gallium/radeon: don't re-create queries for DCC stat gathering
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw {
31 struct r600_query b;
32
33 uint64_t begin_result;
34 uint64_t end_result;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle *fence;
37 };
38
39 static void r600_query_sw_destroy(struct r600_common_context *rctx,
40 struct r600_query *rquery)
41 {
42 struct pipe_screen *screen = rctx->b.screen;
43 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
44
45 screen->fence_reference(screen, &query->fence, NULL);
46 FREE(query);
47 }
48
49 static enum radeon_value_id winsys_id_from_type(unsigned type)
50 {
51 switch (type) {
52 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
53 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
54 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
55 case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
56 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
57 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
58 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
59 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
60 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
61 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
62 default: unreachable("query type does not correspond to winsys id");
63 }
64 }
65
66 static bool r600_query_sw_begin(struct r600_common_context *rctx,
67 struct r600_query *rquery)
68 {
69 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
70
71 switch(query->b.type) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT:
73 case PIPE_QUERY_GPU_FINISHED:
74 break;
75 case R600_QUERY_DRAW_CALLS:
76 query->begin_result = rctx->num_draw_calls;
77 break;
78 case R600_QUERY_SPILL_DRAW_CALLS:
79 query->begin_result = rctx->num_spill_draw_calls;
80 break;
81 case R600_QUERY_COMPUTE_CALLS:
82 query->begin_result = rctx->num_compute_calls;
83 break;
84 case R600_QUERY_SPILL_COMPUTE_CALLS:
85 query->begin_result = rctx->num_spill_compute_calls;
86 break;
87 case R600_QUERY_DMA_CALLS:
88 query->begin_result = rctx->num_dma_calls;
89 break;
90 case R600_QUERY_REQUESTED_VRAM:
91 case R600_QUERY_REQUESTED_GTT:
92 case R600_QUERY_VRAM_USAGE:
93 case R600_QUERY_GTT_USAGE:
94 case R600_QUERY_GPU_TEMPERATURE:
95 case R600_QUERY_CURRENT_GPU_SCLK:
96 case R600_QUERY_CURRENT_GPU_MCLK:
97 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
98 query->begin_result = 0;
99 break;
100 case R600_QUERY_BUFFER_WAIT_TIME:
101 case R600_QUERY_NUM_CS_FLUSHES:
102 case R600_QUERY_NUM_BYTES_MOVED: {
103 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
104 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
105 break;
106 }
107 case R600_QUERY_GPU_LOAD:
108 query->begin_result = r600_gpu_load_begin(rctx->screen);
109 break;
110 case R600_QUERY_NUM_COMPILATIONS:
111 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
112 break;
113 case R600_QUERY_NUM_SHADERS_CREATED:
114 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
115 break;
116 case R600_QUERY_GPIN_ASIC_ID:
117 case R600_QUERY_GPIN_NUM_SIMD:
118 case R600_QUERY_GPIN_NUM_RB:
119 case R600_QUERY_GPIN_NUM_SPI:
120 case R600_QUERY_GPIN_NUM_SE:
121 break;
122 default:
123 unreachable("r600_query_sw_begin: bad query type");
124 }
125
126 return true;
127 }
128
129 static bool r600_query_sw_end(struct r600_common_context *rctx,
130 struct r600_query *rquery)
131 {
132 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
133
134 switch(query->b.type) {
135 case PIPE_QUERY_TIMESTAMP_DISJOINT:
136 break;
137 case PIPE_QUERY_GPU_FINISHED:
138 rctx->b.flush(&rctx->b, &query->fence, 0);
139 break;
140 case R600_QUERY_DRAW_CALLS:
141 query->end_result = rctx->num_draw_calls;
142 break;
143 case R600_QUERY_SPILL_DRAW_CALLS:
144 query->end_result = rctx->num_spill_draw_calls;
145 break;
146 case R600_QUERY_COMPUTE_CALLS:
147 query->end_result = rctx->num_compute_calls;
148 break;
149 case R600_QUERY_SPILL_COMPUTE_CALLS:
150 query->end_result = rctx->num_spill_compute_calls;
151 break;
152 case R600_QUERY_DMA_CALLS:
153 query->end_result = rctx->num_dma_calls;
154 break;
155 case R600_QUERY_REQUESTED_VRAM:
156 case R600_QUERY_REQUESTED_GTT:
157 case R600_QUERY_VRAM_USAGE:
158 case R600_QUERY_GTT_USAGE:
159 case R600_QUERY_GPU_TEMPERATURE:
160 case R600_QUERY_CURRENT_GPU_SCLK:
161 case R600_QUERY_CURRENT_GPU_MCLK:
162 case R600_QUERY_BUFFER_WAIT_TIME:
163 case R600_QUERY_NUM_CS_FLUSHES:
164 case R600_QUERY_NUM_BYTES_MOVED: {
165 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
166 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
167 break;
168 }
169 case R600_QUERY_GPU_LOAD:
170 query->end_result = r600_gpu_load_end(rctx->screen,
171 query->begin_result);
172 query->begin_result = 0;
173 break;
174 case R600_QUERY_NUM_COMPILATIONS:
175 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
176 break;
177 case R600_QUERY_NUM_SHADERS_CREATED:
178 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
179 break;
180 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
181 query->end_result = rctx->last_tex_ps_draw_ratio;
182 break;
183 case R600_QUERY_GPIN_ASIC_ID:
184 case R600_QUERY_GPIN_NUM_SIMD:
185 case R600_QUERY_GPIN_NUM_RB:
186 case R600_QUERY_GPIN_NUM_SPI:
187 case R600_QUERY_GPIN_NUM_SE:
188 break;
189 default:
190 unreachable("r600_query_sw_end: bad query type");
191 }
192
193 return true;
194 }
195
196 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
197 struct r600_query *rquery,
198 bool wait,
199 union pipe_query_result *result)
200 {
201 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
202
203 switch (query->b.type) {
204 case PIPE_QUERY_TIMESTAMP_DISJOINT:
205 /* Convert from cycles per millisecond to cycles per second (Hz). */
206 result->timestamp_disjoint.frequency =
207 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
208 result->timestamp_disjoint.disjoint = false;
209 return true;
210 case PIPE_QUERY_GPU_FINISHED: {
211 struct pipe_screen *screen = rctx->b.screen;
212 result->b = screen->fence_finish(screen, query->fence,
213 wait ? PIPE_TIMEOUT_INFINITE : 0);
214 return result->b;
215 }
216
217 case R600_QUERY_GPIN_ASIC_ID:
218 result->u32 = 0;
219 return true;
220 case R600_QUERY_GPIN_NUM_SIMD:
221 result->u32 = rctx->screen->info.num_good_compute_units;
222 return true;
223 case R600_QUERY_GPIN_NUM_RB:
224 result->u32 = rctx->screen->info.num_render_backends;
225 return true;
226 case R600_QUERY_GPIN_NUM_SPI:
227 result->u32 = 1; /* all supported chips have one SPI per SE */
228 return true;
229 case R600_QUERY_GPIN_NUM_SE:
230 result->u32 = rctx->screen->info.max_se;
231 return true;
232 }
233
234 result->u64 = query->end_result - query->begin_result;
235
236 switch (query->b.type) {
237 case R600_QUERY_BUFFER_WAIT_TIME:
238 case R600_QUERY_GPU_TEMPERATURE:
239 result->u64 /= 1000;
240 break;
241 case R600_QUERY_CURRENT_GPU_SCLK:
242 case R600_QUERY_CURRENT_GPU_MCLK:
243 result->u64 *= 1000000;
244 break;
245 }
246
247 return true;
248 }
249
250 static struct r600_query_ops sw_query_ops = {
251 .destroy = r600_query_sw_destroy,
252 .begin = r600_query_sw_begin,
253 .end = r600_query_sw_end,
254 .get_result = r600_query_sw_get_result
255 };
256
257 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
258 unsigned query_type)
259 {
260 struct r600_query_sw *query;
261
262 query = CALLOC_STRUCT(r600_query_sw);
263 if (!query)
264 return NULL;
265
266 query->b.type = query_type;
267 query->b.ops = &sw_query_ops;
268
269 return (struct pipe_query *)query;
270 }
271
272 void r600_query_hw_destroy(struct r600_common_context *rctx,
273 struct r600_query *rquery)
274 {
275 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
276 struct r600_query_buffer *prev = query->buffer.previous;
277
278 /* Release all query buffers. */
279 while (prev) {
280 struct r600_query_buffer *qbuf = prev;
281 prev = prev->previous;
282 r600_resource_reference(&qbuf->buf, NULL);
283 FREE(qbuf);
284 }
285
286 r600_resource_reference(&query->buffer.buf, NULL);
287 FREE(rquery);
288 }
289
290 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
291 struct r600_query_hw *query)
292 {
293 unsigned buf_size = MAX2(query->result_size,
294 ctx->screen->info.gart_page_size);
295
296 /* Queries are normally read by the CPU after
297 * being written by the gpu, hence staging is probably a good
298 * usage pattern.
299 */
300 struct r600_resource *buf = (struct r600_resource*)
301 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
302 PIPE_USAGE_STAGING, buf_size);
303 if (!buf)
304 return NULL;
305
306 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
307 if (!query->ops->prepare_buffer(ctx, query, buf)) {
308 r600_resource_reference(&buf, NULL);
309 return NULL;
310 }
311 }
312
313 return buf;
314 }
315
316 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
317 struct r600_query_hw *query,
318 struct r600_resource *buffer)
319 {
320 /* Callers ensure that the buffer is currently unused by the GPU. */
321 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
322 PIPE_TRANSFER_WRITE |
323 PIPE_TRANSFER_UNSYNCHRONIZED);
324 if (!results)
325 return false;
326
327 memset(results, 0, buffer->b.b.width0);
328
329 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
330 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
331 unsigned num_results;
332 unsigned i, j;
333
334 /* Set top bits for unused backends. */
335 num_results = buffer->b.b.width0 / (16 * ctx->max_db);
336 for (j = 0; j < num_results; j++) {
337 for (i = 0; i < ctx->max_db; i++) {
338 if (!(ctx->backend_mask & (1<<i))) {
339 results[(i * 4)+1] = 0x80000000;
340 results[(i * 4)+3] = 0x80000000;
341 }
342 }
343 results += 4 * ctx->max_db;
344 }
345 }
346
347 return true;
348 }
349
350 static struct r600_query_ops query_hw_ops = {
351 .destroy = r600_query_hw_destroy,
352 .begin = r600_query_hw_begin,
353 .end = r600_query_hw_end,
354 .get_result = r600_query_hw_get_result,
355 };
356
357 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
358 struct r600_query_hw *query,
359 struct r600_resource *buffer,
360 uint64_t va);
361 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
362 struct r600_query_hw *query,
363 struct r600_resource *buffer,
364 uint64_t va);
365 static void r600_query_hw_add_result(struct r600_common_context *ctx,
366 struct r600_query_hw *, void *buffer,
367 union pipe_query_result *result);
368 static void r600_query_hw_clear_result(struct r600_query_hw *,
369 union pipe_query_result *);
370
371 static struct r600_query_hw_ops query_hw_default_hw_ops = {
372 .prepare_buffer = r600_query_hw_prepare_buffer,
373 .emit_start = r600_query_hw_do_emit_start,
374 .emit_stop = r600_query_hw_do_emit_stop,
375 .clear_result = r600_query_hw_clear_result,
376 .add_result = r600_query_hw_add_result,
377 };
378
379 bool r600_query_hw_init(struct r600_common_context *rctx,
380 struct r600_query_hw *query)
381 {
382 query->buffer.buf = r600_new_query_buffer(rctx, query);
383 if (!query->buffer.buf)
384 return false;
385
386 return true;
387 }
388
389 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
390 unsigned query_type,
391 unsigned index)
392 {
393 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
394 if (!query)
395 return NULL;
396
397 query->b.type = query_type;
398 query->b.ops = &query_hw_ops;
399 query->ops = &query_hw_default_hw_ops;
400
401 switch (query_type) {
402 case PIPE_QUERY_OCCLUSION_COUNTER:
403 case PIPE_QUERY_OCCLUSION_PREDICATE:
404 query->result_size = 16 * rctx->max_db;
405 query->num_cs_dw_begin = 6;
406 query->num_cs_dw_end = 6;
407 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
408 break;
409 case PIPE_QUERY_TIME_ELAPSED:
410 query->result_size = 16;
411 query->num_cs_dw_begin = 8;
412 query->num_cs_dw_end = 8;
413 break;
414 case PIPE_QUERY_TIMESTAMP:
415 query->result_size = 8;
416 query->num_cs_dw_end = 8;
417 query->flags = R600_QUERY_HW_FLAG_NO_START;
418 break;
419 case PIPE_QUERY_PRIMITIVES_EMITTED:
420 case PIPE_QUERY_PRIMITIVES_GENERATED:
421 case PIPE_QUERY_SO_STATISTICS:
422 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
423 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
424 query->result_size = 32;
425 query->num_cs_dw_begin = 6;
426 query->num_cs_dw_end = 6;
427 query->stream = index;
428 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
429 break;
430 case PIPE_QUERY_PIPELINE_STATISTICS:
431 /* 11 values on EG, 8 on R600. */
432 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
433 query->num_cs_dw_begin = 6;
434 query->num_cs_dw_end = 6;
435 break;
436 default:
437 assert(0);
438 FREE(query);
439 return NULL;
440 }
441
442 if (!r600_query_hw_init(rctx, query)) {
443 FREE(query);
444 return NULL;
445 }
446
447 return (struct pipe_query *)query;
448 }
449
450 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
451 unsigned type, int diff)
452 {
453 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
454 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
455 bool old_enable = rctx->num_occlusion_queries != 0;
456 bool old_perfect_enable =
457 rctx->num_perfect_occlusion_queries != 0;
458 bool enable, perfect_enable;
459
460 rctx->num_occlusion_queries += diff;
461 assert(rctx->num_occlusion_queries >= 0);
462
463 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
464 rctx->num_perfect_occlusion_queries += diff;
465 assert(rctx->num_perfect_occlusion_queries >= 0);
466 }
467
468 enable = rctx->num_occlusion_queries != 0;
469 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
470
471 if (enable != old_enable || perfect_enable != old_perfect_enable) {
472 rctx->set_occlusion_query_state(&rctx->b, enable);
473 }
474 }
475 }
476
477 static unsigned event_type_for_stream(struct r600_query_hw *query)
478 {
479 switch (query->stream) {
480 default:
481 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
482 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
483 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
484 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
485 }
486 }
487
488 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
489 struct r600_query_hw *query,
490 struct r600_resource *buffer,
491 uint64_t va)
492 {
493 struct radeon_winsys_cs *cs = ctx->gfx.cs;
494
495 switch (query->b.type) {
496 case PIPE_QUERY_OCCLUSION_COUNTER:
497 case PIPE_QUERY_OCCLUSION_PREDICATE:
498 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
499 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
500 radeon_emit(cs, va);
501 radeon_emit(cs, (va >> 32) & 0xFFFF);
502 break;
503 case PIPE_QUERY_PRIMITIVES_EMITTED:
504 case PIPE_QUERY_PRIMITIVES_GENERATED:
505 case PIPE_QUERY_SO_STATISTICS:
506 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
507 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
508 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
509 radeon_emit(cs, va);
510 radeon_emit(cs, (va >> 32) & 0xFFFF);
511 break;
512 case PIPE_QUERY_TIME_ELAPSED:
513 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
514 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
515 radeon_emit(cs, va);
516 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
517 radeon_emit(cs, 0);
518 radeon_emit(cs, 0);
519 break;
520 case PIPE_QUERY_PIPELINE_STATISTICS:
521 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
522 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
523 radeon_emit(cs, va);
524 radeon_emit(cs, (va >> 32) & 0xFFFF);
525 break;
526 default:
527 assert(0);
528 }
529 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
530 RADEON_PRIO_QUERY);
531 }
532
533 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
534 struct r600_query_hw *query)
535 {
536 uint64_t va;
537
538 if (!query->buffer.buf)
539 return; // previous buffer allocation failure
540
541 r600_update_occlusion_query_state(ctx, query->b.type, 1);
542 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
543
544 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
545 true);
546
547 /* Get a new query buffer if needed. */
548 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
549 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
550 *qbuf = query->buffer;
551 query->buffer.results_end = 0;
552 query->buffer.previous = qbuf;
553 query->buffer.buf = r600_new_query_buffer(ctx, query);
554 if (!query->buffer.buf)
555 return;
556 }
557
558 /* emit begin query */
559 va = query->buffer.buf->gpu_address + query->buffer.results_end;
560
561 query->ops->emit_start(ctx, query, query->buffer.buf, va);
562
563 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
564 }
565
566 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
567 struct r600_query_hw *query,
568 struct r600_resource *buffer,
569 uint64_t va)
570 {
571 struct radeon_winsys_cs *cs = ctx->gfx.cs;
572
573 switch (query->b.type) {
574 case PIPE_QUERY_OCCLUSION_COUNTER:
575 case PIPE_QUERY_OCCLUSION_PREDICATE:
576 va += 8;
577 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
578 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
579 radeon_emit(cs, va);
580 radeon_emit(cs, (va >> 32) & 0xFFFF);
581 break;
582 case PIPE_QUERY_PRIMITIVES_EMITTED:
583 case PIPE_QUERY_PRIMITIVES_GENERATED:
584 case PIPE_QUERY_SO_STATISTICS:
585 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
586 va += query->result_size/2;
587 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
588 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
589 radeon_emit(cs, va);
590 radeon_emit(cs, (va >> 32) & 0xFFFF);
591 break;
592 case PIPE_QUERY_TIME_ELAPSED:
593 va += query->result_size/2;
594 /* fall through */
595 case PIPE_QUERY_TIMESTAMP:
596 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
597 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
598 radeon_emit(cs, va);
599 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
600 radeon_emit(cs, 0);
601 radeon_emit(cs, 0);
602 break;
603 case PIPE_QUERY_PIPELINE_STATISTICS:
604 va += query->result_size/2;
605 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
606 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
607 radeon_emit(cs, va);
608 radeon_emit(cs, (va >> 32) & 0xFFFF);
609 break;
610 default:
611 assert(0);
612 }
613 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
614 RADEON_PRIO_QUERY);
615 }
616
617 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
618 struct r600_query_hw *query)
619 {
620 uint64_t va;
621
622 if (!query->buffer.buf)
623 return; // previous buffer allocation failure
624
625 /* The queries which need begin already called this in begin_query. */
626 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
627 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
628 }
629
630 /* emit end query */
631 va = query->buffer.buf->gpu_address + query->buffer.results_end;
632
633 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
634
635 query->buffer.results_end += query->result_size;
636
637 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
638 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
639
640 r600_update_occlusion_query_state(ctx, query->b.type, -1);
641 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
642 }
643
644 static void r600_emit_query_predication(struct r600_common_context *ctx,
645 struct r600_atom *atom)
646 {
647 struct radeon_winsys_cs *cs = ctx->gfx.cs;
648 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
649 struct r600_query_buffer *qbuf;
650 uint32_t op;
651 bool flag_wait;
652
653 if (!query)
654 return;
655
656 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
657 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
658
659 switch (query->b.type) {
660 case PIPE_QUERY_OCCLUSION_COUNTER:
661 case PIPE_QUERY_OCCLUSION_PREDICATE:
662 op = PRED_OP(PREDICATION_OP_ZPASS);
663 break;
664 case PIPE_QUERY_PRIMITIVES_EMITTED:
665 case PIPE_QUERY_PRIMITIVES_GENERATED:
666 case PIPE_QUERY_SO_STATISTICS:
667 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
668 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
669 break;
670 default:
671 assert(0);
672 return;
673 }
674
675 /* if true then invert, see GL_ARB_conditional_render_inverted */
676 if (ctx->render_cond_invert)
677 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
678 else
679 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
680
681 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
682
683 /* emit predicate packets for all data blocks */
684 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
685 unsigned results_base = 0;
686 uint64_t va = qbuf->buf->gpu_address;
687
688 while (results_base < qbuf->results_end) {
689 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
690 radeon_emit(cs, va + results_base);
691 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
692 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
693 RADEON_PRIO_QUERY);
694 results_base += query->result_size;
695
696 /* set CONTINUE bit for all packets except the first */
697 op |= PREDICATION_CONTINUE;
698 }
699 }
700 }
701
702 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
703 {
704 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
705
706 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
707 query_type == PIPE_QUERY_GPU_FINISHED ||
708 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
709 return r600_query_sw_create(ctx, query_type);
710
711 return r600_query_hw_create(rctx, query_type, index);
712 }
713
714 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
715 {
716 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
717 struct r600_query *rquery = (struct r600_query *)query;
718
719 rquery->ops->destroy(rctx, rquery);
720 }
721
722 static boolean r600_begin_query(struct pipe_context *ctx,
723 struct pipe_query *query)
724 {
725 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
726 struct r600_query *rquery = (struct r600_query *)query;
727
728 return rquery->ops->begin(rctx, rquery);
729 }
730
731 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
732 struct r600_query_hw *query)
733 {
734 struct r600_query_buffer *prev = query->buffer.previous;
735
736 /* Discard the old query buffers. */
737 while (prev) {
738 struct r600_query_buffer *qbuf = prev;
739 prev = prev->previous;
740 r600_resource_reference(&qbuf->buf, NULL);
741 FREE(qbuf);
742 }
743
744 query->buffer.results_end = 0;
745 query->buffer.previous = NULL;
746
747 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
748 /* Obtain a new buffer if the current one can't be mapped without a stall. */
749 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
750 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
751 r600_resource_reference(&query->buffer.buf, NULL);
752 query->buffer.buf = r600_new_query_buffer(rctx, query);
753 } else {
754 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
755 r600_resource_reference(&query->buffer.buf, NULL);
756 }
757 }
758 }
759
760 bool r600_query_hw_begin(struct r600_common_context *rctx,
761 struct r600_query *rquery)
762 {
763 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
764
765 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
766 assert(0);
767 return false;
768 }
769
770 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
771 r600_query_hw_reset_buffers(rctx, query);
772
773 r600_query_hw_emit_start(rctx, query);
774 if (!query->buffer.buf)
775 return false;
776
777 LIST_ADDTAIL(&query->list, &rctx->active_queries);
778 return true;
779 }
780
781 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
782 {
783 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
784 struct r600_query *rquery = (struct r600_query *)query;
785
786 return rquery->ops->end(rctx, rquery);
787 }
788
789 bool r600_query_hw_end(struct r600_common_context *rctx,
790 struct r600_query *rquery)
791 {
792 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
793
794 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
795 r600_query_hw_reset_buffers(rctx, query);
796
797 r600_query_hw_emit_stop(rctx, query);
798
799 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
800 LIST_DELINIT(&query->list);
801
802 if (!query->buffer.buf)
803 return false;
804
805 return true;
806 }
807
808 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
809 bool test_status_bit)
810 {
811 uint32_t *current_result = (uint32_t*)map;
812 uint64_t start, end;
813
814 start = (uint64_t)current_result[start_index] |
815 (uint64_t)current_result[start_index+1] << 32;
816 end = (uint64_t)current_result[end_index] |
817 (uint64_t)current_result[end_index+1] << 32;
818
819 if (!test_status_bit ||
820 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
821 return end - start;
822 }
823 return 0;
824 }
825
826 static void r600_query_hw_add_result(struct r600_common_context *ctx,
827 struct r600_query_hw *query,
828 void *buffer,
829 union pipe_query_result *result)
830 {
831 switch (query->b.type) {
832 case PIPE_QUERY_OCCLUSION_COUNTER: {
833 unsigned results_base = 0;
834 while (results_base != query->result_size) {
835 result->u64 +=
836 r600_query_read_result(buffer + results_base, 0, 2, true);
837 results_base += 16;
838 }
839 break;
840 }
841 case PIPE_QUERY_OCCLUSION_PREDICATE: {
842 unsigned results_base = 0;
843 while (results_base != query->result_size) {
844 result->b = result->b ||
845 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
846 results_base += 16;
847 }
848 break;
849 }
850 case PIPE_QUERY_TIME_ELAPSED:
851 result->u64 += r600_query_read_result(buffer, 0, 2, false);
852 break;
853 case PIPE_QUERY_TIMESTAMP:
854 {
855 uint32_t *current_result = (uint32_t*)buffer;
856 result->u64 = (uint64_t)current_result[0] |
857 (uint64_t)current_result[1] << 32;
858 break;
859 }
860 case PIPE_QUERY_PRIMITIVES_EMITTED:
861 /* SAMPLE_STREAMOUTSTATS stores this structure:
862 * {
863 * u64 NumPrimitivesWritten;
864 * u64 PrimitiveStorageNeeded;
865 * }
866 * We only need NumPrimitivesWritten here. */
867 result->u64 += r600_query_read_result(buffer, 2, 6, true);
868 break;
869 case PIPE_QUERY_PRIMITIVES_GENERATED:
870 /* Here we read PrimitiveStorageNeeded. */
871 result->u64 += r600_query_read_result(buffer, 0, 4, true);
872 break;
873 case PIPE_QUERY_SO_STATISTICS:
874 result->so_statistics.num_primitives_written +=
875 r600_query_read_result(buffer, 2, 6, true);
876 result->so_statistics.primitives_storage_needed +=
877 r600_query_read_result(buffer, 0, 4, true);
878 break;
879 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
880 result->b = result->b ||
881 r600_query_read_result(buffer, 2, 6, true) !=
882 r600_query_read_result(buffer, 0, 4, true);
883 break;
884 case PIPE_QUERY_PIPELINE_STATISTICS:
885 if (ctx->chip_class >= EVERGREEN) {
886 result->pipeline_statistics.ps_invocations +=
887 r600_query_read_result(buffer, 0, 22, false);
888 result->pipeline_statistics.c_primitives +=
889 r600_query_read_result(buffer, 2, 24, false);
890 result->pipeline_statistics.c_invocations +=
891 r600_query_read_result(buffer, 4, 26, false);
892 result->pipeline_statistics.vs_invocations +=
893 r600_query_read_result(buffer, 6, 28, false);
894 result->pipeline_statistics.gs_invocations +=
895 r600_query_read_result(buffer, 8, 30, false);
896 result->pipeline_statistics.gs_primitives +=
897 r600_query_read_result(buffer, 10, 32, false);
898 result->pipeline_statistics.ia_primitives +=
899 r600_query_read_result(buffer, 12, 34, false);
900 result->pipeline_statistics.ia_vertices +=
901 r600_query_read_result(buffer, 14, 36, false);
902 result->pipeline_statistics.hs_invocations +=
903 r600_query_read_result(buffer, 16, 38, false);
904 result->pipeline_statistics.ds_invocations +=
905 r600_query_read_result(buffer, 18, 40, false);
906 result->pipeline_statistics.cs_invocations +=
907 r600_query_read_result(buffer, 20, 42, false);
908 } else {
909 result->pipeline_statistics.ps_invocations +=
910 r600_query_read_result(buffer, 0, 16, false);
911 result->pipeline_statistics.c_primitives +=
912 r600_query_read_result(buffer, 2, 18, false);
913 result->pipeline_statistics.c_invocations +=
914 r600_query_read_result(buffer, 4, 20, false);
915 result->pipeline_statistics.vs_invocations +=
916 r600_query_read_result(buffer, 6, 22, false);
917 result->pipeline_statistics.gs_invocations +=
918 r600_query_read_result(buffer, 8, 24, false);
919 result->pipeline_statistics.gs_primitives +=
920 r600_query_read_result(buffer, 10, 26, false);
921 result->pipeline_statistics.ia_primitives +=
922 r600_query_read_result(buffer, 12, 28, false);
923 result->pipeline_statistics.ia_vertices +=
924 r600_query_read_result(buffer, 14, 30, false);
925 }
926 #if 0 /* for testing */
927 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
928 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
929 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
930 result->pipeline_statistics.ia_vertices,
931 result->pipeline_statistics.ia_primitives,
932 result->pipeline_statistics.vs_invocations,
933 result->pipeline_statistics.hs_invocations,
934 result->pipeline_statistics.ds_invocations,
935 result->pipeline_statistics.gs_invocations,
936 result->pipeline_statistics.gs_primitives,
937 result->pipeline_statistics.c_invocations,
938 result->pipeline_statistics.c_primitives,
939 result->pipeline_statistics.ps_invocations,
940 result->pipeline_statistics.cs_invocations);
941 #endif
942 break;
943 default:
944 assert(0);
945 }
946 }
947
948 static boolean r600_get_query_result(struct pipe_context *ctx,
949 struct pipe_query *query, boolean wait,
950 union pipe_query_result *result)
951 {
952 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
953 struct r600_query *rquery = (struct r600_query *)query;
954
955 return rquery->ops->get_result(rctx, rquery, wait, result);
956 }
957
958 static void r600_query_hw_clear_result(struct r600_query_hw *query,
959 union pipe_query_result *result)
960 {
961 util_query_clear_result(result, query->b.type);
962 }
963
964 bool r600_query_hw_get_result(struct r600_common_context *rctx,
965 struct r600_query *rquery,
966 bool wait, union pipe_query_result *result)
967 {
968 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
969 struct r600_query_buffer *qbuf;
970
971 query->ops->clear_result(query, result);
972
973 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
974 unsigned results_base = 0;
975 void *map;
976
977 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
978 PIPE_TRANSFER_READ |
979 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
980 if (!map)
981 return false;
982
983 while (results_base != qbuf->results_end) {
984 query->ops->add_result(rctx, query, map + results_base,
985 result);
986 results_base += query->result_size;
987 }
988 }
989
990 /* Convert the time to expected units. */
991 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
992 rquery->type == PIPE_QUERY_TIMESTAMP) {
993 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
994 }
995 return true;
996 }
997
998 static void r600_render_condition(struct pipe_context *ctx,
999 struct pipe_query *query,
1000 boolean condition,
1001 uint mode)
1002 {
1003 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1004 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1005 struct r600_query_buffer *qbuf;
1006 struct r600_atom *atom = &rctx->render_cond_atom;
1007
1008 rctx->render_cond = query;
1009 rctx->render_cond_invert = condition;
1010 rctx->render_cond_mode = mode;
1011
1012 /* Compute the size of SET_PREDICATION packets. */
1013 atom->num_dw = 0;
1014 if (query) {
1015 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1016 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1017 }
1018
1019 rctx->set_atom_dirty(rctx, atom, query != NULL);
1020 }
1021
1022 void r600_suspend_queries(struct r600_common_context *ctx)
1023 {
1024 struct r600_query_hw *query;
1025
1026 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1027 r600_query_hw_emit_stop(ctx, query);
1028 }
1029 assert(ctx->num_cs_dw_queries_suspend == 0);
1030 }
1031
1032 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1033 struct list_head *query_list)
1034 {
1035 struct r600_query_hw *query;
1036 unsigned num_dw = 0;
1037
1038 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1039 /* begin + end */
1040 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1041
1042 /* Workaround for the fact that
1043 * num_cs_dw_nontimer_queries_suspend is incremented for every
1044 * resumed query, which raises the bar in need_cs_space for
1045 * queries about to be resumed.
1046 */
1047 num_dw += query->num_cs_dw_end;
1048 }
1049 /* primitives generated query */
1050 num_dw += ctx->streamout.enable_atom.num_dw;
1051 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1052 num_dw += 13;
1053
1054 return num_dw;
1055 }
1056
1057 void r600_resume_queries(struct r600_common_context *ctx)
1058 {
1059 struct r600_query_hw *query;
1060 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1061
1062 assert(ctx->num_cs_dw_queries_suspend == 0);
1063
1064 /* Check CS space here. Resuming must not be interrupted by flushes. */
1065 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1066
1067 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1068 r600_query_hw_emit_start(ctx, query);
1069 }
1070 }
1071
1072 /* Get backends mask */
1073 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1074 {
1075 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1076 struct r600_resource *buffer;
1077 uint32_t *results;
1078 unsigned num_backends = ctx->screen->info.num_render_backends;
1079 unsigned i, mask = 0;
1080
1081 /* if backend_map query is supported by the kernel */
1082 if (ctx->screen->info.r600_gb_backend_map_valid) {
1083 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1084 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1085 unsigned item_width, item_mask;
1086
1087 if (ctx->chip_class >= EVERGREEN) {
1088 item_width = 4;
1089 item_mask = 0x7;
1090 } else {
1091 item_width = 2;
1092 item_mask = 0x3;
1093 }
1094
1095 while (num_tile_pipes--) {
1096 i = backend_map & item_mask;
1097 mask |= (1<<i);
1098 backend_map >>= item_width;
1099 }
1100 if (mask != 0) {
1101 ctx->backend_mask = mask;
1102 return;
1103 }
1104 }
1105
1106 /* otherwise backup path for older kernels */
1107
1108 /* create buffer for event data */
1109 buffer = (struct r600_resource*)
1110 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1111 PIPE_USAGE_STAGING, ctx->max_db*16);
1112 if (!buffer)
1113 goto err;
1114
1115 /* initialize buffer with zeroes */
1116 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1117 if (results) {
1118 memset(results, 0, ctx->max_db * 4 * 4);
1119
1120 /* emit EVENT_WRITE for ZPASS_DONE */
1121 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1122 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1123 radeon_emit(cs, buffer->gpu_address);
1124 radeon_emit(cs, buffer->gpu_address >> 32);
1125
1126 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1127 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1128
1129 /* analyze results */
1130 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1131 if (results) {
1132 for(i = 0; i < ctx->max_db; i++) {
1133 /* at least highest bit will be set if backend is used */
1134 if (results[i*4 + 1])
1135 mask |= (1<<i);
1136 }
1137 }
1138 }
1139
1140 r600_resource_reference(&buffer, NULL);
1141
1142 if (mask != 0) {
1143 ctx->backend_mask = mask;
1144 return;
1145 }
1146
1147 err:
1148 /* fallback to old method - set num_backends lower bits to 1 */
1149 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1150 return;
1151 }
1152
1153 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1154 { \
1155 .name = name_, \
1156 .query_type = R600_QUERY_##query_type_, \
1157 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1158 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1159 .group_id = group_id_ \
1160 }
1161
1162 #define X(name_, query_type_, type_, result_type_) \
1163 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1164
1165 #define XG(group_, name_, query_type_, type_, result_type_) \
1166 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1167
1168 static struct pipe_driver_query_info r600_driver_query_list[] = {
1169 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1170 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1171 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1172 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1173 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1174 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1175 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1176 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1177 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1178 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1179 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1180 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1181 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1182 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1183 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1184
1185 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1186 * which use it as a fallback path to detect the GPU type.
1187 *
1188 * Note: The names of these queries are significant for GPUPerfStudio
1189 * (and possibly their order as well). */
1190 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1191 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1192 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1193 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1194 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1195
1196 /* The following queries must be at the end of the list because their
1197 * availability is adjusted dynamically based on the DRM version. */
1198 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1199 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1200 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1201 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1202 };
1203
1204 #undef X
1205 #undef XG
1206 #undef XFULL
1207
1208 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1209 {
1210 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1211 return ARRAY_SIZE(r600_driver_query_list);
1212 else if (rscreen->info.drm_major == 3)
1213 return ARRAY_SIZE(r600_driver_query_list) - 3;
1214 else
1215 return ARRAY_SIZE(r600_driver_query_list) - 4;
1216 }
1217
1218 static int r600_get_driver_query_info(struct pipe_screen *screen,
1219 unsigned index,
1220 struct pipe_driver_query_info *info)
1221 {
1222 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1223 unsigned num_queries = r600_get_num_queries(rscreen);
1224
1225 if (!info) {
1226 unsigned num_perfcounters =
1227 r600_get_perfcounter_info(rscreen, 0, NULL);
1228
1229 return num_queries + num_perfcounters;
1230 }
1231
1232 if (index >= num_queries)
1233 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1234
1235 *info = r600_driver_query_list[index];
1236
1237 switch (info->query_type) {
1238 case R600_QUERY_REQUESTED_VRAM:
1239 case R600_QUERY_VRAM_USAGE:
1240 info->max_value.u64 = rscreen->info.vram_size;
1241 break;
1242 case R600_QUERY_REQUESTED_GTT:
1243 case R600_QUERY_GTT_USAGE:
1244 info->max_value.u64 = rscreen->info.gart_size;
1245 break;
1246 case R600_QUERY_GPU_TEMPERATURE:
1247 info->max_value.u64 = 125;
1248 break;
1249 }
1250
1251 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1252 info->group_id += rscreen->perfcounters->num_groups;
1253
1254 return 1;
1255 }
1256
1257 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1258 * performance counter groups, so be careful when changing this and related
1259 * functions.
1260 */
1261 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1262 unsigned index,
1263 struct pipe_driver_query_group_info *info)
1264 {
1265 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1266 unsigned num_pc_groups = 0;
1267
1268 if (rscreen->perfcounters)
1269 num_pc_groups = rscreen->perfcounters->num_groups;
1270
1271 if (!info)
1272 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1273
1274 if (index < num_pc_groups)
1275 return r600_get_perfcounter_group_info(rscreen, index, info);
1276
1277 index -= num_pc_groups;
1278 if (index >= R600_NUM_SW_QUERY_GROUPS)
1279 return 0;
1280
1281 info->name = "GPIN";
1282 info->max_active_queries = 5;
1283 info->num_queries = 5;
1284 return 1;
1285 }
1286
1287 void r600_query_init(struct r600_common_context *rctx)
1288 {
1289 rctx->b.create_query = r600_create_query;
1290 rctx->b.create_batch_query = r600_create_batch_query;
1291 rctx->b.destroy_query = r600_destroy_query;
1292 rctx->b.begin_query = r600_begin_query;
1293 rctx->b.end_query = r600_end_query;
1294 rctx->b.get_query_result = r600_get_query_result;
1295 rctx->render_cond_atom.emit = r600_emit_query_predication;
1296
1297 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1298 rctx->b.render_condition = r600_render_condition;
1299
1300 LIST_INITHEAD(&rctx->active_queries);
1301 }
1302
1303 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1304 {
1305 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1306 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1307 }