2e06746b9eff90064905c087aa1d578e9d991d9b
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw {
31 struct r600_query b;
32
33 uint64_t begin_result;
34 uint64_t end_result;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle *fence;
37 };
38
39 static void r600_query_sw_destroy(struct r600_common_context *rctx,
40 struct r600_query *rquery)
41 {
42 struct pipe_screen *screen = rctx->b.screen;
43 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
44
45 screen->fence_reference(screen, &query->fence, NULL);
46 FREE(query);
47 }
48
49 static enum radeon_value_id winsys_id_from_type(unsigned type)
50 {
51 switch (type) {
52 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
53 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
54 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
55 case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
56 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
57 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
58 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
59 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
60 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
61 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
62 default: unreachable("query type does not correspond to winsys id");
63 }
64 }
65
66 static boolean r600_query_sw_begin(struct r600_common_context *rctx,
67 struct r600_query *rquery)
68 {
69 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
70
71 switch(query->b.type) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT:
73 case PIPE_QUERY_GPU_FINISHED:
74 break;
75 case R600_QUERY_DRAW_CALLS:
76 query->begin_result = rctx->num_draw_calls;
77 break;
78 case R600_QUERY_SPILL_DRAW_CALLS:
79 query->begin_result = rctx->num_spill_draw_calls;
80 break;
81 case R600_QUERY_COMPUTE_CALLS:
82 query->begin_result = rctx->num_compute_calls;
83 break;
84 case R600_QUERY_SPILL_COMPUTE_CALLS:
85 query->begin_result = rctx->num_spill_compute_calls;
86 break;
87 case R600_QUERY_DMA_CALLS:
88 query->begin_result = rctx->num_dma_calls;
89 break;
90 case R600_QUERY_REQUESTED_VRAM:
91 case R600_QUERY_REQUESTED_GTT:
92 case R600_QUERY_VRAM_USAGE:
93 case R600_QUERY_GTT_USAGE:
94 case R600_QUERY_GPU_TEMPERATURE:
95 case R600_QUERY_CURRENT_GPU_SCLK:
96 case R600_QUERY_CURRENT_GPU_MCLK:
97 query->begin_result = 0;
98 break;
99 case R600_QUERY_BUFFER_WAIT_TIME:
100 case R600_QUERY_NUM_CS_FLUSHES:
101 case R600_QUERY_NUM_BYTES_MOVED: {
102 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
103 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
104 break;
105 }
106 case R600_QUERY_GPU_LOAD:
107 query->begin_result = r600_gpu_load_begin(rctx->screen);
108 break;
109 case R600_QUERY_NUM_COMPILATIONS:
110 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
111 break;
112 case R600_QUERY_NUM_SHADERS_CREATED:
113 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
114 break;
115 case R600_QUERY_GPIN_ASIC_ID:
116 case R600_QUERY_GPIN_NUM_SIMD:
117 case R600_QUERY_GPIN_NUM_RB:
118 case R600_QUERY_GPIN_NUM_SPI:
119 case R600_QUERY_GPIN_NUM_SE:
120 break;
121 default:
122 unreachable("r600_query_sw_begin: bad query type");
123 }
124
125 return TRUE;
126 }
127
128 static bool r600_query_sw_end(struct r600_common_context *rctx,
129 struct r600_query *rquery)
130 {
131 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
132
133 switch(query->b.type) {
134 case PIPE_QUERY_TIMESTAMP_DISJOINT:
135 break;
136 case PIPE_QUERY_GPU_FINISHED:
137 rctx->b.flush(&rctx->b, &query->fence, 0);
138 break;
139 case R600_QUERY_DRAW_CALLS:
140 query->end_result = rctx->num_draw_calls;
141 break;
142 case R600_QUERY_SPILL_DRAW_CALLS:
143 query->end_result = rctx->num_spill_draw_calls;
144 break;
145 case R600_QUERY_COMPUTE_CALLS:
146 query->end_result = rctx->num_compute_calls;
147 break;
148 case R600_QUERY_SPILL_COMPUTE_CALLS:
149 query->end_result = rctx->num_spill_compute_calls;
150 break;
151 case R600_QUERY_DMA_CALLS:
152 query->end_result = rctx->num_dma_calls;
153 break;
154 case R600_QUERY_REQUESTED_VRAM:
155 case R600_QUERY_REQUESTED_GTT:
156 case R600_QUERY_VRAM_USAGE:
157 case R600_QUERY_GTT_USAGE:
158 case R600_QUERY_GPU_TEMPERATURE:
159 case R600_QUERY_CURRENT_GPU_SCLK:
160 case R600_QUERY_CURRENT_GPU_MCLK:
161 case R600_QUERY_BUFFER_WAIT_TIME:
162 case R600_QUERY_NUM_CS_FLUSHES:
163 case R600_QUERY_NUM_BYTES_MOVED: {
164 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
165 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
166 break;
167 }
168 case R600_QUERY_GPU_LOAD:
169 query->end_result = r600_gpu_load_end(rctx->screen,
170 query->begin_result);
171 query->begin_result = 0;
172 break;
173 case R600_QUERY_NUM_COMPILATIONS:
174 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
175 break;
176 case R600_QUERY_NUM_SHADERS_CREATED:
177 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
178 break;
179 case R600_QUERY_GPIN_ASIC_ID:
180 case R600_QUERY_GPIN_NUM_SIMD:
181 case R600_QUERY_GPIN_NUM_RB:
182 case R600_QUERY_GPIN_NUM_SPI:
183 case R600_QUERY_GPIN_NUM_SE:
184 break;
185 default:
186 unreachable("r600_query_sw_end: bad query type");
187 }
188
189 return true;
190 }
191
192 static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
193 struct r600_query *rquery,
194 boolean wait,
195 union pipe_query_result *result)
196 {
197 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
198
199 switch (query->b.type) {
200 case PIPE_QUERY_TIMESTAMP_DISJOINT:
201 /* Convert from cycles per millisecond to cycles per second (Hz). */
202 result->timestamp_disjoint.frequency =
203 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
204 result->timestamp_disjoint.disjoint = FALSE;
205 return TRUE;
206 case PIPE_QUERY_GPU_FINISHED: {
207 struct pipe_screen *screen = rctx->b.screen;
208 result->b = screen->fence_finish(screen, query->fence,
209 wait ? PIPE_TIMEOUT_INFINITE : 0);
210 return result->b;
211 }
212
213 case R600_QUERY_GPIN_ASIC_ID:
214 result->u32 = 0;
215 return TRUE;
216 case R600_QUERY_GPIN_NUM_SIMD:
217 result->u32 = rctx->screen->info.num_good_compute_units;
218 return TRUE;
219 case R600_QUERY_GPIN_NUM_RB:
220 result->u32 = rctx->screen->info.num_render_backends;
221 return TRUE;
222 case R600_QUERY_GPIN_NUM_SPI:
223 result->u32 = 1; /* all supported chips have one SPI per SE */
224 return TRUE;
225 case R600_QUERY_GPIN_NUM_SE:
226 result->u32 = rctx->screen->info.max_se;
227 return TRUE;
228 }
229
230 result->u64 = query->end_result - query->begin_result;
231
232 switch (query->b.type) {
233 case R600_QUERY_BUFFER_WAIT_TIME:
234 case R600_QUERY_GPU_TEMPERATURE:
235 result->u64 /= 1000;
236 break;
237 case R600_QUERY_CURRENT_GPU_SCLK:
238 case R600_QUERY_CURRENT_GPU_MCLK:
239 result->u64 *= 1000000;
240 break;
241 }
242
243 return TRUE;
244 }
245
246 static struct r600_query_ops sw_query_ops = {
247 .destroy = r600_query_sw_destroy,
248 .begin = r600_query_sw_begin,
249 .end = r600_query_sw_end,
250 .get_result = r600_query_sw_get_result
251 };
252
253 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
254 unsigned query_type)
255 {
256 struct r600_query_sw *query;
257
258 query = CALLOC_STRUCT(r600_query_sw);
259 if (!query)
260 return NULL;
261
262 query->b.type = query_type;
263 query->b.ops = &sw_query_ops;
264
265 return (struct pipe_query *)query;
266 }
267
268 void r600_query_hw_destroy(struct r600_common_context *rctx,
269 struct r600_query *rquery)
270 {
271 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
272 struct r600_query_buffer *prev = query->buffer.previous;
273
274 /* Release all query buffers. */
275 while (prev) {
276 struct r600_query_buffer *qbuf = prev;
277 prev = prev->previous;
278 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
279 FREE(qbuf);
280 }
281
282 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
283 FREE(rquery);
284 }
285
286 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
287 struct r600_query_hw *query)
288 {
289 unsigned buf_size = MAX2(query->result_size,
290 ctx->screen->info.gart_page_size);
291
292 /* Queries are normally read by the CPU after
293 * being written by the gpu, hence staging is probably a good
294 * usage pattern.
295 */
296 struct r600_resource *buf = (struct r600_resource*)
297 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
298 PIPE_USAGE_STAGING, buf_size);
299 if (!buf)
300 return NULL;
301
302 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
303 if (!query->ops->prepare_buffer(ctx, query, buf)) {
304 pipe_resource_reference((struct pipe_resource **)&buf, NULL);
305 return NULL;
306 }
307 }
308
309 return buf;
310 }
311
312 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
313 struct r600_query_hw *query,
314 struct r600_resource *buffer)
315 {
316 /* Callers ensure that the buffer is currently unused by the GPU. */
317 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
318 PIPE_TRANSFER_WRITE |
319 PIPE_TRANSFER_UNSYNCHRONIZED);
320 if (!results)
321 return false;
322
323 memset(results, 0, buffer->b.b.width0);
324
325 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
326 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
327 unsigned num_results;
328 unsigned i, j;
329
330 /* Set top bits for unused backends. */
331 num_results = buffer->b.b.width0 / (16 * ctx->max_db);
332 for (j = 0; j < num_results; j++) {
333 for (i = 0; i < ctx->max_db; i++) {
334 if (!(ctx->backend_mask & (1<<i))) {
335 results[(i * 4)+1] = 0x80000000;
336 results[(i * 4)+3] = 0x80000000;
337 }
338 }
339 results += 4 * ctx->max_db;
340 }
341 }
342
343 return true;
344 }
345
346 static struct r600_query_ops query_hw_ops = {
347 .destroy = r600_query_hw_destroy,
348 .begin = r600_query_hw_begin,
349 .end = r600_query_hw_end,
350 .get_result = r600_query_hw_get_result,
351 };
352
353 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
354 struct r600_query_hw *query,
355 struct r600_resource *buffer,
356 uint64_t va);
357 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
358 struct r600_query_hw *query,
359 struct r600_resource *buffer,
360 uint64_t va);
361 static void r600_query_hw_add_result(struct r600_common_context *ctx,
362 struct r600_query_hw *, void *buffer,
363 union pipe_query_result *result);
364 static void r600_query_hw_clear_result(struct r600_query_hw *,
365 union pipe_query_result *);
366
367 static struct r600_query_hw_ops query_hw_default_hw_ops = {
368 .prepare_buffer = r600_query_hw_prepare_buffer,
369 .emit_start = r600_query_hw_do_emit_start,
370 .emit_stop = r600_query_hw_do_emit_stop,
371 .clear_result = r600_query_hw_clear_result,
372 .add_result = r600_query_hw_add_result,
373 };
374
375 boolean r600_query_hw_init(struct r600_common_context *rctx,
376 struct r600_query_hw *query)
377 {
378 query->buffer.buf = r600_new_query_buffer(rctx, query);
379 if (!query->buffer.buf)
380 return FALSE;
381
382 return TRUE;
383 }
384
385 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
386 unsigned query_type,
387 unsigned index)
388 {
389 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
390 if (!query)
391 return NULL;
392
393 query->b.type = query_type;
394 query->b.ops = &query_hw_ops;
395 query->ops = &query_hw_default_hw_ops;
396
397 switch (query_type) {
398 case PIPE_QUERY_OCCLUSION_COUNTER:
399 case PIPE_QUERY_OCCLUSION_PREDICATE:
400 query->result_size = 16 * rctx->max_db;
401 query->num_cs_dw_begin = 6;
402 query->num_cs_dw_end = 6;
403 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
404 break;
405 case PIPE_QUERY_TIME_ELAPSED:
406 query->result_size = 16;
407 query->num_cs_dw_begin = 8;
408 query->num_cs_dw_end = 8;
409 break;
410 case PIPE_QUERY_TIMESTAMP:
411 query->result_size = 8;
412 query->num_cs_dw_end = 8;
413 query->flags = R600_QUERY_HW_FLAG_NO_START;
414 break;
415 case PIPE_QUERY_PRIMITIVES_EMITTED:
416 case PIPE_QUERY_PRIMITIVES_GENERATED:
417 case PIPE_QUERY_SO_STATISTICS:
418 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
419 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
420 query->result_size = 32;
421 query->num_cs_dw_begin = 6;
422 query->num_cs_dw_end = 6;
423 query->stream = index;
424 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
425 break;
426 case PIPE_QUERY_PIPELINE_STATISTICS:
427 /* 11 values on EG, 8 on R600. */
428 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
429 query->num_cs_dw_begin = 6;
430 query->num_cs_dw_end = 6;
431 break;
432 default:
433 assert(0);
434 FREE(query);
435 return NULL;
436 }
437
438 if (!r600_query_hw_init(rctx, query)) {
439 FREE(query);
440 return NULL;
441 }
442
443 return (struct pipe_query *)query;
444 }
445
446 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
447 unsigned type, int diff)
448 {
449 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
450 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
451 bool old_enable = rctx->num_occlusion_queries != 0;
452 bool old_perfect_enable =
453 rctx->num_perfect_occlusion_queries != 0;
454 bool enable, perfect_enable;
455
456 rctx->num_occlusion_queries += diff;
457 assert(rctx->num_occlusion_queries >= 0);
458
459 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
460 rctx->num_perfect_occlusion_queries += diff;
461 assert(rctx->num_perfect_occlusion_queries >= 0);
462 }
463
464 enable = rctx->num_occlusion_queries != 0;
465 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
466
467 if (enable != old_enable || perfect_enable != old_perfect_enable) {
468 rctx->set_occlusion_query_state(&rctx->b, enable);
469 }
470 }
471 }
472
473 static unsigned event_type_for_stream(struct r600_query_hw *query)
474 {
475 switch (query->stream) {
476 default:
477 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
478 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
479 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
480 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
481 }
482 }
483
484 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
485 struct r600_query_hw *query,
486 struct r600_resource *buffer,
487 uint64_t va)
488 {
489 struct radeon_winsys_cs *cs = ctx->gfx.cs;
490
491 switch (query->b.type) {
492 case PIPE_QUERY_OCCLUSION_COUNTER:
493 case PIPE_QUERY_OCCLUSION_PREDICATE:
494 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
495 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
496 radeon_emit(cs, va);
497 radeon_emit(cs, (va >> 32) & 0xFFFF);
498 break;
499 case PIPE_QUERY_PRIMITIVES_EMITTED:
500 case PIPE_QUERY_PRIMITIVES_GENERATED:
501 case PIPE_QUERY_SO_STATISTICS:
502 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
503 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
504 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
505 radeon_emit(cs, va);
506 radeon_emit(cs, (va >> 32) & 0xFFFF);
507 break;
508 case PIPE_QUERY_TIME_ELAPSED:
509 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
510 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
511 radeon_emit(cs, va);
512 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
513 radeon_emit(cs, 0);
514 radeon_emit(cs, 0);
515 break;
516 case PIPE_QUERY_PIPELINE_STATISTICS:
517 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
518 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
519 radeon_emit(cs, va);
520 radeon_emit(cs, (va >> 32) & 0xFFFF);
521 break;
522 default:
523 assert(0);
524 }
525 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
526 RADEON_PRIO_QUERY);
527 }
528
529 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
530 struct r600_query_hw *query)
531 {
532 uint64_t va;
533
534 if (!query->buffer.buf)
535 return; // previous buffer allocation failure
536
537 r600_update_occlusion_query_state(ctx, query->b.type, 1);
538 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
539
540 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
541 TRUE);
542
543 /* Get a new query buffer if needed. */
544 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
545 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
546 *qbuf = query->buffer;
547 query->buffer.results_end = 0;
548 query->buffer.previous = qbuf;
549 query->buffer.buf = r600_new_query_buffer(ctx, query);
550 if (!query->buffer.buf)
551 return;
552 }
553
554 /* emit begin query */
555 va = query->buffer.buf->gpu_address + query->buffer.results_end;
556
557 query->ops->emit_start(ctx, query, query->buffer.buf, va);
558
559 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
560 }
561
562 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
563 struct r600_query_hw *query,
564 struct r600_resource *buffer,
565 uint64_t va)
566 {
567 struct radeon_winsys_cs *cs = ctx->gfx.cs;
568
569 switch (query->b.type) {
570 case PIPE_QUERY_OCCLUSION_COUNTER:
571 case PIPE_QUERY_OCCLUSION_PREDICATE:
572 va += 8;
573 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
574 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
575 radeon_emit(cs, va);
576 radeon_emit(cs, (va >> 32) & 0xFFFF);
577 break;
578 case PIPE_QUERY_PRIMITIVES_EMITTED:
579 case PIPE_QUERY_PRIMITIVES_GENERATED:
580 case PIPE_QUERY_SO_STATISTICS:
581 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
582 va += query->result_size/2;
583 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
584 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
585 radeon_emit(cs, va);
586 radeon_emit(cs, (va >> 32) & 0xFFFF);
587 break;
588 case PIPE_QUERY_TIME_ELAPSED:
589 va += query->result_size/2;
590 /* fall through */
591 case PIPE_QUERY_TIMESTAMP:
592 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
593 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
594 radeon_emit(cs, va);
595 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
596 radeon_emit(cs, 0);
597 radeon_emit(cs, 0);
598 break;
599 case PIPE_QUERY_PIPELINE_STATISTICS:
600 va += query->result_size/2;
601 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
602 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
603 radeon_emit(cs, va);
604 radeon_emit(cs, (va >> 32) & 0xFFFF);
605 break;
606 default:
607 assert(0);
608 }
609 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
610 RADEON_PRIO_QUERY);
611 }
612
613 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
614 struct r600_query_hw *query)
615 {
616 uint64_t va;
617
618 if (!query->buffer.buf)
619 return; // previous buffer allocation failure
620
621 /* The queries which need begin already called this in begin_query. */
622 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
623 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
624 }
625
626 /* emit end query */
627 va = query->buffer.buf->gpu_address + query->buffer.results_end;
628
629 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
630
631 query->buffer.results_end += query->result_size;
632
633 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
634 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
635
636 r600_update_occlusion_query_state(ctx, query->b.type, -1);
637 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
638 }
639
640 static void r600_emit_query_predication(struct r600_common_context *ctx,
641 struct r600_atom *atom)
642 {
643 struct radeon_winsys_cs *cs = ctx->gfx.cs;
644 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
645 struct r600_query_buffer *qbuf;
646 uint32_t op;
647 bool flag_wait;
648
649 if (!query)
650 return;
651
652 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
653 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
654
655 switch (query->b.type) {
656 case PIPE_QUERY_OCCLUSION_COUNTER:
657 case PIPE_QUERY_OCCLUSION_PREDICATE:
658 op = PRED_OP(PREDICATION_OP_ZPASS);
659 break;
660 case PIPE_QUERY_PRIMITIVES_EMITTED:
661 case PIPE_QUERY_PRIMITIVES_GENERATED:
662 case PIPE_QUERY_SO_STATISTICS:
663 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
664 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
665 break;
666 default:
667 assert(0);
668 return;
669 }
670
671 /* if true then invert, see GL_ARB_conditional_render_inverted */
672 if (ctx->render_cond_invert)
673 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
674 else
675 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
676
677 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
678
679 /* emit predicate packets for all data blocks */
680 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
681 unsigned results_base = 0;
682 uint64_t va = qbuf->buf->gpu_address;
683
684 while (results_base < qbuf->results_end) {
685 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
686 radeon_emit(cs, va + results_base);
687 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
688 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
689 RADEON_PRIO_QUERY);
690 results_base += query->result_size;
691
692 /* set CONTINUE bit for all packets except the first */
693 op |= PREDICATION_CONTINUE;
694 }
695 }
696 }
697
698 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
699 {
700 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
701
702 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
703 query_type == PIPE_QUERY_GPU_FINISHED ||
704 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
705 return r600_query_sw_create(ctx, query_type);
706
707 return r600_query_hw_create(rctx, query_type, index);
708 }
709
710 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
711 {
712 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
713 struct r600_query *rquery = (struct r600_query *)query;
714
715 rquery->ops->destroy(rctx, rquery);
716 }
717
718 static boolean r600_begin_query(struct pipe_context *ctx,
719 struct pipe_query *query)
720 {
721 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
722 struct r600_query *rquery = (struct r600_query *)query;
723
724 return rquery->ops->begin(rctx, rquery);
725 }
726
727 static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
728 struct r600_query_hw *query)
729 {
730 struct r600_query_buffer *prev = query->buffer.previous;
731
732 /* Discard the old query buffers. */
733 while (prev) {
734 struct r600_query_buffer *qbuf = prev;
735 prev = prev->previous;
736 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
737 FREE(qbuf);
738 }
739
740 query->buffer.results_end = 0;
741 query->buffer.previous = NULL;
742
743 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
744 /* Obtain a new buffer if the current one can't be mapped without a stall. */
745 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
746 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
747 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
748 query->buffer.buf = r600_new_query_buffer(rctx, query);
749 } else {
750 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
751 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
752 }
753 }
754 }
755
756 boolean r600_query_hw_begin(struct r600_common_context *rctx,
757 struct r600_query *rquery)
758 {
759 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
760
761 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
762 assert(0);
763 return false;
764 }
765
766 r600_query_hw_reset_buffers(rctx, query);
767
768 r600_query_hw_emit_start(rctx, query);
769 if (!query->buffer.buf)
770 return false;
771
772 LIST_ADDTAIL(&query->list, &rctx->active_queries);
773 return true;
774 }
775
776 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
777 {
778 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
779 struct r600_query *rquery = (struct r600_query *)query;
780
781 return rquery->ops->end(rctx, rquery);
782 }
783
784 bool r600_query_hw_end(struct r600_common_context *rctx,
785 struct r600_query *rquery)
786 {
787 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
788
789 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
790 r600_query_hw_reset_buffers(rctx, query);
791
792 r600_query_hw_emit_stop(rctx, query);
793
794 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
795 LIST_DELINIT(&query->list);
796
797 if (!query->buffer.buf)
798 return false;
799
800 return true;
801 }
802
803 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
804 bool test_status_bit)
805 {
806 uint32_t *current_result = (uint32_t*)map;
807 uint64_t start, end;
808
809 start = (uint64_t)current_result[start_index] |
810 (uint64_t)current_result[start_index+1] << 32;
811 end = (uint64_t)current_result[end_index] |
812 (uint64_t)current_result[end_index+1] << 32;
813
814 if (!test_status_bit ||
815 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
816 return end - start;
817 }
818 return 0;
819 }
820
821 static void r600_query_hw_add_result(struct r600_common_context *ctx,
822 struct r600_query_hw *query,
823 void *buffer,
824 union pipe_query_result *result)
825 {
826 switch (query->b.type) {
827 case PIPE_QUERY_OCCLUSION_COUNTER: {
828 unsigned results_base = 0;
829 while (results_base != query->result_size) {
830 result->u64 +=
831 r600_query_read_result(buffer + results_base, 0, 2, true);
832 results_base += 16;
833 }
834 break;
835 }
836 case PIPE_QUERY_OCCLUSION_PREDICATE: {
837 unsigned results_base = 0;
838 while (results_base != query->result_size) {
839 result->b = result->b ||
840 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
841 results_base += 16;
842 }
843 break;
844 }
845 case PIPE_QUERY_TIME_ELAPSED:
846 result->u64 += r600_query_read_result(buffer, 0, 2, false);
847 break;
848 case PIPE_QUERY_TIMESTAMP:
849 {
850 uint32_t *current_result = (uint32_t*)buffer;
851 result->u64 = (uint64_t)current_result[0] |
852 (uint64_t)current_result[1] << 32;
853 break;
854 }
855 case PIPE_QUERY_PRIMITIVES_EMITTED:
856 /* SAMPLE_STREAMOUTSTATS stores this structure:
857 * {
858 * u64 NumPrimitivesWritten;
859 * u64 PrimitiveStorageNeeded;
860 * }
861 * We only need NumPrimitivesWritten here. */
862 result->u64 += r600_query_read_result(buffer, 2, 6, true);
863 break;
864 case PIPE_QUERY_PRIMITIVES_GENERATED:
865 /* Here we read PrimitiveStorageNeeded. */
866 result->u64 += r600_query_read_result(buffer, 0, 4, true);
867 break;
868 case PIPE_QUERY_SO_STATISTICS:
869 result->so_statistics.num_primitives_written +=
870 r600_query_read_result(buffer, 2, 6, true);
871 result->so_statistics.primitives_storage_needed +=
872 r600_query_read_result(buffer, 0, 4, true);
873 break;
874 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
875 result->b = result->b ||
876 r600_query_read_result(buffer, 2, 6, true) !=
877 r600_query_read_result(buffer, 0, 4, true);
878 break;
879 case PIPE_QUERY_PIPELINE_STATISTICS:
880 if (ctx->chip_class >= EVERGREEN) {
881 result->pipeline_statistics.ps_invocations +=
882 r600_query_read_result(buffer, 0, 22, false);
883 result->pipeline_statistics.c_primitives +=
884 r600_query_read_result(buffer, 2, 24, false);
885 result->pipeline_statistics.c_invocations +=
886 r600_query_read_result(buffer, 4, 26, false);
887 result->pipeline_statistics.vs_invocations +=
888 r600_query_read_result(buffer, 6, 28, false);
889 result->pipeline_statistics.gs_invocations +=
890 r600_query_read_result(buffer, 8, 30, false);
891 result->pipeline_statistics.gs_primitives +=
892 r600_query_read_result(buffer, 10, 32, false);
893 result->pipeline_statistics.ia_primitives +=
894 r600_query_read_result(buffer, 12, 34, false);
895 result->pipeline_statistics.ia_vertices +=
896 r600_query_read_result(buffer, 14, 36, false);
897 result->pipeline_statistics.hs_invocations +=
898 r600_query_read_result(buffer, 16, 38, false);
899 result->pipeline_statistics.ds_invocations +=
900 r600_query_read_result(buffer, 18, 40, false);
901 result->pipeline_statistics.cs_invocations +=
902 r600_query_read_result(buffer, 20, 42, false);
903 } else {
904 result->pipeline_statistics.ps_invocations +=
905 r600_query_read_result(buffer, 0, 16, false);
906 result->pipeline_statistics.c_primitives +=
907 r600_query_read_result(buffer, 2, 18, false);
908 result->pipeline_statistics.c_invocations +=
909 r600_query_read_result(buffer, 4, 20, false);
910 result->pipeline_statistics.vs_invocations +=
911 r600_query_read_result(buffer, 6, 22, false);
912 result->pipeline_statistics.gs_invocations +=
913 r600_query_read_result(buffer, 8, 24, false);
914 result->pipeline_statistics.gs_primitives +=
915 r600_query_read_result(buffer, 10, 26, false);
916 result->pipeline_statistics.ia_primitives +=
917 r600_query_read_result(buffer, 12, 28, false);
918 result->pipeline_statistics.ia_vertices +=
919 r600_query_read_result(buffer, 14, 30, false);
920 }
921 #if 0 /* for testing */
922 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
923 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
924 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
925 result->pipeline_statistics.ia_vertices,
926 result->pipeline_statistics.ia_primitives,
927 result->pipeline_statistics.vs_invocations,
928 result->pipeline_statistics.hs_invocations,
929 result->pipeline_statistics.ds_invocations,
930 result->pipeline_statistics.gs_invocations,
931 result->pipeline_statistics.gs_primitives,
932 result->pipeline_statistics.c_invocations,
933 result->pipeline_statistics.c_primitives,
934 result->pipeline_statistics.ps_invocations,
935 result->pipeline_statistics.cs_invocations);
936 #endif
937 break;
938 default:
939 assert(0);
940 }
941 }
942
943 static boolean r600_get_query_result(struct pipe_context *ctx,
944 struct pipe_query *query, boolean wait,
945 union pipe_query_result *result)
946 {
947 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
948 struct r600_query *rquery = (struct r600_query *)query;
949
950 return rquery->ops->get_result(rctx, rquery, wait, result);
951 }
952
953 static void r600_query_hw_clear_result(struct r600_query_hw *query,
954 union pipe_query_result *result)
955 {
956 util_query_clear_result(result, query->b.type);
957 }
958
959 boolean r600_query_hw_get_result(struct r600_common_context *rctx,
960 struct r600_query *rquery,
961 boolean wait, union pipe_query_result *result)
962 {
963 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
964 struct r600_query_buffer *qbuf;
965
966 query->ops->clear_result(query, result);
967
968 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
969 unsigned results_base = 0;
970 void *map;
971
972 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
973 PIPE_TRANSFER_READ |
974 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
975 if (!map)
976 return FALSE;
977
978 while (results_base != qbuf->results_end) {
979 query->ops->add_result(rctx, query, map + results_base,
980 result);
981 results_base += query->result_size;
982 }
983 }
984
985 /* Convert the time to expected units. */
986 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
987 rquery->type == PIPE_QUERY_TIMESTAMP) {
988 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
989 }
990 return TRUE;
991 }
992
993 static void r600_render_condition(struct pipe_context *ctx,
994 struct pipe_query *query,
995 boolean condition,
996 uint mode)
997 {
998 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
999 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1000 struct r600_query_buffer *qbuf;
1001 struct r600_atom *atom = &rctx->render_cond_atom;
1002
1003 rctx->render_cond = query;
1004 rctx->render_cond_invert = condition;
1005 rctx->render_cond_mode = mode;
1006
1007 /* Compute the size of SET_PREDICATION packets. */
1008 atom->num_dw = 0;
1009 if (query) {
1010 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1011 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1012 }
1013
1014 rctx->set_atom_dirty(rctx, atom, query != NULL);
1015 }
1016
1017 void r600_suspend_queries(struct r600_common_context *ctx)
1018 {
1019 struct r600_query_hw *query;
1020
1021 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1022 r600_query_hw_emit_stop(ctx, query);
1023 }
1024 assert(ctx->num_cs_dw_queries_suspend == 0);
1025 }
1026
1027 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1028 struct list_head *query_list)
1029 {
1030 struct r600_query_hw *query;
1031 unsigned num_dw = 0;
1032
1033 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1034 /* begin + end */
1035 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1036
1037 /* Workaround for the fact that
1038 * num_cs_dw_nontimer_queries_suspend is incremented for every
1039 * resumed query, which raises the bar in need_cs_space for
1040 * queries about to be resumed.
1041 */
1042 num_dw += query->num_cs_dw_end;
1043 }
1044 /* primitives generated query */
1045 num_dw += ctx->streamout.enable_atom.num_dw;
1046 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1047 num_dw += 13;
1048
1049 return num_dw;
1050 }
1051
1052 void r600_resume_queries(struct r600_common_context *ctx)
1053 {
1054 struct r600_query_hw *query;
1055 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1056
1057 assert(ctx->num_cs_dw_queries_suspend == 0);
1058
1059 /* Check CS space here. Resuming must not be interrupted by flushes. */
1060 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
1061
1062 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1063 r600_query_hw_emit_start(ctx, query);
1064 }
1065 }
1066
1067 /* Get backends mask */
1068 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1069 {
1070 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1071 struct r600_resource *buffer;
1072 uint32_t *results;
1073 unsigned num_backends = ctx->screen->info.num_render_backends;
1074 unsigned i, mask = 0;
1075
1076 /* if backend_map query is supported by the kernel */
1077 if (ctx->screen->info.r600_gb_backend_map_valid) {
1078 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1079 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1080 unsigned item_width, item_mask;
1081
1082 if (ctx->chip_class >= EVERGREEN) {
1083 item_width = 4;
1084 item_mask = 0x7;
1085 } else {
1086 item_width = 2;
1087 item_mask = 0x3;
1088 }
1089
1090 while (num_tile_pipes--) {
1091 i = backend_map & item_mask;
1092 mask |= (1<<i);
1093 backend_map >>= item_width;
1094 }
1095 if (mask != 0) {
1096 ctx->backend_mask = mask;
1097 return;
1098 }
1099 }
1100
1101 /* otherwise backup path for older kernels */
1102
1103 /* create buffer for event data */
1104 buffer = (struct r600_resource*)
1105 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1106 PIPE_USAGE_STAGING, ctx->max_db*16);
1107 if (!buffer)
1108 goto err;
1109
1110 /* initialize buffer with zeroes */
1111 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1112 if (results) {
1113 memset(results, 0, ctx->max_db * 4 * 4);
1114
1115 /* emit EVENT_WRITE for ZPASS_DONE */
1116 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1117 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1118 radeon_emit(cs, buffer->gpu_address);
1119 radeon_emit(cs, buffer->gpu_address >> 32);
1120
1121 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1122 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1123
1124 /* analyze results */
1125 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1126 if (results) {
1127 for(i = 0; i < ctx->max_db; i++) {
1128 /* at least highest bit will be set if backend is used */
1129 if (results[i*4 + 1])
1130 mask |= (1<<i);
1131 }
1132 }
1133 }
1134
1135 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1136
1137 if (mask != 0) {
1138 ctx->backend_mask = mask;
1139 return;
1140 }
1141
1142 err:
1143 /* fallback to old method - set num_backends lower bits to 1 */
1144 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1145 return;
1146 }
1147
1148 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1149 { \
1150 .name = name_, \
1151 .query_type = R600_QUERY_##query_type_, \
1152 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1153 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1154 .group_id = group_id_ \
1155 }
1156
1157 #define X(name_, query_type_, type_, result_type_) \
1158 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1159
1160 #define XG(group_, name_, query_type_, type_, result_type_) \
1161 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1162
1163 static struct pipe_driver_query_info r600_driver_query_list[] = {
1164 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1165 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1166 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1167 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1168 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1169 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1170 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1171 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1172 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1173 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1174 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1175 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1176 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1177 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1178
1179 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1180 * which use it as a fallback path to detect the GPU type.
1181 *
1182 * Note: The names of these queries are significant for GPUPerfStudio
1183 * (and possibly their order as well). */
1184 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1185 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1186 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1187 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1188 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1189
1190 /* The following queries must be at the end of the list because their
1191 * availability is adjusted dynamically based on the DRM version. */
1192 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1193 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1194 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1195 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1196 };
1197
1198 #undef X
1199 #undef XG
1200 #undef XFULL
1201
1202 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1203 {
1204 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1205 return ARRAY_SIZE(r600_driver_query_list);
1206 else if (rscreen->info.drm_major == 3)
1207 return ARRAY_SIZE(r600_driver_query_list) - 3;
1208 else
1209 return ARRAY_SIZE(r600_driver_query_list) - 4;
1210 }
1211
1212 static int r600_get_driver_query_info(struct pipe_screen *screen,
1213 unsigned index,
1214 struct pipe_driver_query_info *info)
1215 {
1216 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1217 unsigned num_queries = r600_get_num_queries(rscreen);
1218
1219 if (!info) {
1220 unsigned num_perfcounters =
1221 r600_get_perfcounter_info(rscreen, 0, NULL);
1222
1223 return num_queries + num_perfcounters;
1224 }
1225
1226 if (index >= num_queries)
1227 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1228
1229 *info = r600_driver_query_list[index];
1230
1231 switch (info->query_type) {
1232 case R600_QUERY_REQUESTED_VRAM:
1233 case R600_QUERY_VRAM_USAGE:
1234 info->max_value.u64 = rscreen->info.vram_size;
1235 break;
1236 case R600_QUERY_REQUESTED_GTT:
1237 case R600_QUERY_GTT_USAGE:
1238 info->max_value.u64 = rscreen->info.gart_size;
1239 break;
1240 case R600_QUERY_GPU_TEMPERATURE:
1241 info->max_value.u64 = 125;
1242 break;
1243 }
1244
1245 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1246 info->group_id += rscreen->perfcounters->num_groups;
1247
1248 return 1;
1249 }
1250
1251 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1252 * performance counter groups, so be careful when changing this and related
1253 * functions.
1254 */
1255 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1256 unsigned index,
1257 struct pipe_driver_query_group_info *info)
1258 {
1259 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1260 unsigned num_pc_groups = 0;
1261
1262 if (rscreen->perfcounters)
1263 num_pc_groups = rscreen->perfcounters->num_groups;
1264
1265 if (!info)
1266 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1267
1268 if (index < num_pc_groups)
1269 return r600_get_perfcounter_group_info(rscreen, index, info);
1270
1271 index -= num_pc_groups;
1272 if (index >= R600_NUM_SW_QUERY_GROUPS)
1273 return 0;
1274
1275 info->name = "GPIN";
1276 info->max_active_queries = 5;
1277 info->num_queries = 5;
1278 return 1;
1279 }
1280
1281 void r600_query_init(struct r600_common_context *rctx)
1282 {
1283 rctx->b.create_query = r600_create_query;
1284 rctx->b.create_batch_query = r600_create_batch_query;
1285 rctx->b.destroy_query = r600_destroy_query;
1286 rctx->b.begin_query = r600_begin_query;
1287 rctx->b.end_query = r600_end_query;
1288 rctx->b.get_query_result = r600_get_query_result;
1289 rctx->render_cond_atom.emit = r600_emit_query_predication;
1290
1291 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1292 rctx->b.render_condition = r600_render_condition;
1293
1294 LIST_INITHEAD(&rctx->active_queries);
1295 }
1296
1297 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1298 {
1299 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1300 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1301 }