freedreno: wire up core pipe_debug_callback
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw {
31 struct r600_query b;
32
33 uint64_t begin_result;
34 uint64_t end_result;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle *fence;
37 };
38
39 static void r600_query_sw_destroy(struct r600_common_context *rctx,
40 struct r600_query *rquery)
41 {
42 struct pipe_screen *screen = rctx->b.screen;
43 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
44
45 screen->fence_reference(screen, &query->fence, NULL);
46 FREE(query);
47 }
48
49 static enum radeon_value_id winsys_id_from_type(unsigned type)
50 {
51 switch (type) {
52 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
53 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
54 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
55 case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
56 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
57 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
58 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
59 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
60 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
61 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
62 default: unreachable("query type does not correspond to winsys id");
63 }
64 }
65
66 static boolean r600_query_sw_begin(struct r600_common_context *rctx,
67 struct r600_query *rquery)
68 {
69 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
70
71 switch(query->b.type) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT:
73 case PIPE_QUERY_GPU_FINISHED:
74 break;
75 case R600_QUERY_DRAW_CALLS:
76 query->begin_result = rctx->num_draw_calls;
77 break;
78 case R600_QUERY_REQUESTED_VRAM:
79 case R600_QUERY_REQUESTED_GTT:
80 case R600_QUERY_VRAM_USAGE:
81 case R600_QUERY_GTT_USAGE:
82 case R600_QUERY_GPU_TEMPERATURE:
83 case R600_QUERY_CURRENT_GPU_SCLK:
84 case R600_QUERY_CURRENT_GPU_MCLK:
85 query->begin_result = 0;
86 break;
87 case R600_QUERY_BUFFER_WAIT_TIME:
88 case R600_QUERY_NUM_CS_FLUSHES:
89 case R600_QUERY_NUM_BYTES_MOVED: {
90 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
91 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
92 break;
93 }
94 case R600_QUERY_GPU_LOAD:
95 query->begin_result = r600_gpu_load_begin(rctx->screen);
96 break;
97 case R600_QUERY_NUM_COMPILATIONS:
98 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
99 break;
100 case R600_QUERY_NUM_SHADERS_CREATED:
101 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
102 break;
103 case R600_QUERY_GPIN_ASIC_ID:
104 case R600_QUERY_GPIN_NUM_SIMD:
105 case R600_QUERY_GPIN_NUM_RB:
106 case R600_QUERY_GPIN_NUM_SPI:
107 case R600_QUERY_GPIN_NUM_SE:
108 break;
109 default:
110 unreachable("r600_query_sw_begin: bad query type");
111 }
112
113 return TRUE;
114 }
115
116 static bool r600_query_sw_end(struct r600_common_context *rctx,
117 struct r600_query *rquery)
118 {
119 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
120
121 switch(query->b.type) {
122 case PIPE_QUERY_TIMESTAMP_DISJOINT:
123 break;
124 case PIPE_QUERY_GPU_FINISHED:
125 rctx->b.flush(&rctx->b, &query->fence, 0);
126 break;
127 case R600_QUERY_DRAW_CALLS:
128 query->end_result = rctx->num_draw_calls;
129 break;
130 case R600_QUERY_REQUESTED_VRAM:
131 case R600_QUERY_REQUESTED_GTT:
132 case R600_QUERY_VRAM_USAGE:
133 case R600_QUERY_GTT_USAGE:
134 case R600_QUERY_GPU_TEMPERATURE:
135 case R600_QUERY_CURRENT_GPU_SCLK:
136 case R600_QUERY_CURRENT_GPU_MCLK:
137 case R600_QUERY_BUFFER_WAIT_TIME:
138 case R600_QUERY_NUM_CS_FLUSHES:
139 case R600_QUERY_NUM_BYTES_MOVED: {
140 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
141 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
142 break;
143 }
144 case R600_QUERY_GPU_LOAD:
145 query->end_result = r600_gpu_load_end(rctx->screen,
146 query->begin_result);
147 query->begin_result = 0;
148 break;
149 case R600_QUERY_NUM_COMPILATIONS:
150 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
151 break;
152 case R600_QUERY_NUM_SHADERS_CREATED:
153 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
154 break;
155 case R600_QUERY_GPIN_ASIC_ID:
156 case R600_QUERY_GPIN_NUM_SIMD:
157 case R600_QUERY_GPIN_NUM_RB:
158 case R600_QUERY_GPIN_NUM_SPI:
159 case R600_QUERY_GPIN_NUM_SE:
160 break;
161 default:
162 unreachable("r600_query_sw_end: bad query type");
163 }
164
165 return true;
166 }
167
168 static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
169 struct r600_query *rquery,
170 boolean wait,
171 union pipe_query_result *result)
172 {
173 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
174
175 switch (query->b.type) {
176 case PIPE_QUERY_TIMESTAMP_DISJOINT:
177 /* Convert from cycles per millisecond to cycles per second (Hz). */
178 result->timestamp_disjoint.frequency =
179 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
180 result->timestamp_disjoint.disjoint = FALSE;
181 return TRUE;
182 case PIPE_QUERY_GPU_FINISHED: {
183 struct pipe_screen *screen = rctx->b.screen;
184 result->b = screen->fence_finish(screen, query->fence,
185 wait ? PIPE_TIMEOUT_INFINITE : 0);
186 return result->b;
187 }
188
189 case R600_QUERY_GPIN_ASIC_ID:
190 result->u32 = 0;
191 return TRUE;
192 case R600_QUERY_GPIN_NUM_SIMD:
193 result->u32 = rctx->screen->info.num_good_compute_units;
194 return TRUE;
195 case R600_QUERY_GPIN_NUM_RB:
196 result->u32 = rctx->screen->info.num_render_backends;
197 return TRUE;
198 case R600_QUERY_GPIN_NUM_SPI:
199 result->u32 = 1; /* all supported chips have one SPI per SE */
200 return TRUE;
201 case R600_QUERY_GPIN_NUM_SE:
202 result->u32 = rctx->screen->info.max_se;
203 return TRUE;
204 }
205
206 result->u64 = query->end_result - query->begin_result;
207
208 switch (query->b.type) {
209 case R600_QUERY_BUFFER_WAIT_TIME:
210 case R600_QUERY_GPU_TEMPERATURE:
211 result->u64 /= 1000;
212 break;
213 case R600_QUERY_CURRENT_GPU_SCLK:
214 case R600_QUERY_CURRENT_GPU_MCLK:
215 result->u64 *= 1000000;
216 break;
217 }
218
219 return TRUE;
220 }
221
222 static struct r600_query_ops sw_query_ops = {
223 .destroy = r600_query_sw_destroy,
224 .begin = r600_query_sw_begin,
225 .end = r600_query_sw_end,
226 .get_result = r600_query_sw_get_result
227 };
228
229 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
230 unsigned query_type)
231 {
232 struct r600_query_sw *query;
233
234 query = CALLOC_STRUCT(r600_query_sw);
235 if (!query)
236 return NULL;
237
238 query->b.type = query_type;
239 query->b.ops = &sw_query_ops;
240
241 return (struct pipe_query *)query;
242 }
243
244 void r600_query_hw_destroy(struct r600_common_context *rctx,
245 struct r600_query *rquery)
246 {
247 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
248 struct r600_query_buffer *prev = query->buffer.previous;
249
250 /* Release all query buffers. */
251 while (prev) {
252 struct r600_query_buffer *qbuf = prev;
253 prev = prev->previous;
254 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
255 FREE(qbuf);
256 }
257
258 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
259 FREE(rquery);
260 }
261
262 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
263 struct r600_query_hw *query)
264 {
265 unsigned buf_size = MAX2(query->result_size, 4096);
266
267 /* Queries are normally read by the CPU after
268 * being written by the gpu, hence staging is probably a good
269 * usage pattern.
270 */
271 struct r600_resource *buf = (struct r600_resource*)
272 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
273 PIPE_USAGE_STAGING, buf_size);
274 if (!buf)
275 return NULL;
276
277 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
278 if (!query->ops->prepare_buffer(ctx, query, buf)) {
279 pipe_resource_reference((struct pipe_resource **)&buf, NULL);
280 return NULL;
281 }
282 }
283
284 return buf;
285 }
286
287 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
288 struct r600_query_hw *query,
289 struct r600_resource *buffer)
290 {
291 /* Callers ensure that the buffer is currently unused by the GPU. */
292 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
293 PIPE_TRANSFER_WRITE |
294 PIPE_TRANSFER_UNSYNCHRONIZED);
295 if (!results)
296 return false;
297
298 memset(results, 0, buffer->b.b.width0);
299
300 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
301 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
302 unsigned num_results;
303 unsigned i, j;
304
305 /* Set top bits for unused backends. */
306 num_results = buffer->b.b.width0 / (16 * ctx->max_db);
307 for (j = 0; j < num_results; j++) {
308 for (i = 0; i < ctx->max_db; i++) {
309 if (!(ctx->backend_mask & (1<<i))) {
310 results[(i * 4)+1] = 0x80000000;
311 results[(i * 4)+3] = 0x80000000;
312 }
313 }
314 results += 4 * ctx->max_db;
315 }
316 }
317
318 return true;
319 }
320
321 static struct r600_query_ops query_hw_ops = {
322 .destroy = r600_query_hw_destroy,
323 .begin = r600_query_hw_begin,
324 .end = r600_query_hw_end,
325 .get_result = r600_query_hw_get_result,
326 };
327
328 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
329 struct r600_query_hw *query,
330 struct r600_resource *buffer,
331 uint64_t va);
332 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
333 struct r600_query_hw *query,
334 struct r600_resource *buffer,
335 uint64_t va);
336 static void r600_query_hw_add_result(struct r600_common_context *ctx,
337 struct r600_query_hw *, void *buffer,
338 union pipe_query_result *result);
339 static void r600_query_hw_clear_result(struct r600_query_hw *,
340 union pipe_query_result *);
341
342 static struct r600_query_hw_ops query_hw_default_hw_ops = {
343 .prepare_buffer = r600_query_hw_prepare_buffer,
344 .emit_start = r600_query_hw_do_emit_start,
345 .emit_stop = r600_query_hw_do_emit_stop,
346 .clear_result = r600_query_hw_clear_result,
347 .add_result = r600_query_hw_add_result,
348 };
349
350 boolean r600_query_hw_init(struct r600_common_context *rctx,
351 struct r600_query_hw *query)
352 {
353 query->buffer.buf = r600_new_query_buffer(rctx, query);
354 if (!query->buffer.buf)
355 return FALSE;
356
357 return TRUE;
358 }
359
360 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
361 unsigned query_type,
362 unsigned index)
363 {
364 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
365 if (!query)
366 return NULL;
367
368 query->b.type = query_type;
369 query->b.ops = &query_hw_ops;
370 query->ops = &query_hw_default_hw_ops;
371
372 switch (query_type) {
373 case PIPE_QUERY_OCCLUSION_COUNTER:
374 case PIPE_QUERY_OCCLUSION_PREDICATE:
375 query->result_size = 16 * rctx->max_db;
376 query->num_cs_dw_begin = 6;
377 query->num_cs_dw_end = 6;
378 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
379 break;
380 case PIPE_QUERY_TIME_ELAPSED:
381 query->result_size = 16;
382 query->num_cs_dw_begin = 8;
383 query->num_cs_dw_end = 8;
384 break;
385 case PIPE_QUERY_TIMESTAMP:
386 query->result_size = 8;
387 query->num_cs_dw_end = 8;
388 query->flags = R600_QUERY_HW_FLAG_NO_START;
389 break;
390 case PIPE_QUERY_PRIMITIVES_EMITTED:
391 case PIPE_QUERY_PRIMITIVES_GENERATED:
392 case PIPE_QUERY_SO_STATISTICS:
393 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
394 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
395 query->result_size = 32;
396 query->num_cs_dw_begin = 6;
397 query->num_cs_dw_end = 6;
398 query->stream = index;
399 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
400 break;
401 case PIPE_QUERY_PIPELINE_STATISTICS:
402 /* 11 values on EG, 8 on R600. */
403 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
404 query->num_cs_dw_begin = 6;
405 query->num_cs_dw_end = 6;
406 break;
407 default:
408 assert(0);
409 FREE(query);
410 return NULL;
411 }
412
413 if (!r600_query_hw_init(rctx, query)) {
414 FREE(query);
415 return NULL;
416 }
417
418 return (struct pipe_query *)query;
419 }
420
421 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
422 unsigned type, int diff)
423 {
424 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
425 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
426 bool old_enable = rctx->num_occlusion_queries != 0;
427 bool old_perfect_enable =
428 rctx->num_perfect_occlusion_queries != 0;
429 bool enable, perfect_enable;
430
431 rctx->num_occlusion_queries += diff;
432 assert(rctx->num_occlusion_queries >= 0);
433
434 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
435 rctx->num_perfect_occlusion_queries += diff;
436 assert(rctx->num_perfect_occlusion_queries >= 0);
437 }
438
439 enable = rctx->num_occlusion_queries != 0;
440 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
441
442 if (enable != old_enable || perfect_enable != old_perfect_enable) {
443 rctx->set_occlusion_query_state(&rctx->b, enable);
444 }
445 }
446 }
447
448 static unsigned event_type_for_stream(struct r600_query_hw *query)
449 {
450 switch (query->stream) {
451 default:
452 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
453 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
454 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
455 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
456 }
457 }
458
459 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
460 struct r600_query_hw *query,
461 struct r600_resource *buffer,
462 uint64_t va)
463 {
464 struct radeon_winsys_cs *cs = ctx->gfx.cs;
465
466 switch (query->b.type) {
467 case PIPE_QUERY_OCCLUSION_COUNTER:
468 case PIPE_QUERY_OCCLUSION_PREDICATE:
469 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
470 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
471 radeon_emit(cs, va);
472 radeon_emit(cs, (va >> 32) & 0xFFFF);
473 break;
474 case PIPE_QUERY_PRIMITIVES_EMITTED:
475 case PIPE_QUERY_PRIMITIVES_GENERATED:
476 case PIPE_QUERY_SO_STATISTICS:
477 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
478 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
479 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
480 radeon_emit(cs, va);
481 radeon_emit(cs, (va >> 32) & 0xFFFF);
482 break;
483 case PIPE_QUERY_TIME_ELAPSED:
484 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
485 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
486 radeon_emit(cs, va);
487 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
488 radeon_emit(cs, 0);
489 radeon_emit(cs, 0);
490 break;
491 case PIPE_QUERY_PIPELINE_STATISTICS:
492 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
493 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
494 radeon_emit(cs, va);
495 radeon_emit(cs, (va >> 32) & 0xFFFF);
496 break;
497 default:
498 assert(0);
499 }
500 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
501 RADEON_PRIO_QUERY);
502 }
503
504 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
505 struct r600_query_hw *query)
506 {
507 uint64_t va;
508
509 if (!query->buffer.buf)
510 return; // previous buffer allocation failure
511
512 r600_update_occlusion_query_state(ctx, query->b.type, 1);
513 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
514
515 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
516 TRUE);
517
518 /* Get a new query buffer if needed. */
519 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
520 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
521 *qbuf = query->buffer;
522 query->buffer.results_end = 0;
523 query->buffer.previous = qbuf;
524 query->buffer.buf = r600_new_query_buffer(ctx, query);
525 if (!query->buffer.buf)
526 return;
527 }
528
529 /* emit begin query */
530 va = query->buffer.buf->gpu_address + query->buffer.results_end;
531
532 query->ops->emit_start(ctx, query, query->buffer.buf, va);
533
534 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
535 }
536
537 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
538 struct r600_query_hw *query,
539 struct r600_resource *buffer,
540 uint64_t va)
541 {
542 struct radeon_winsys_cs *cs = ctx->gfx.cs;
543
544 switch (query->b.type) {
545 case PIPE_QUERY_OCCLUSION_COUNTER:
546 case PIPE_QUERY_OCCLUSION_PREDICATE:
547 va += 8;
548 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
549 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
550 radeon_emit(cs, va);
551 radeon_emit(cs, (va >> 32) & 0xFFFF);
552 break;
553 case PIPE_QUERY_PRIMITIVES_EMITTED:
554 case PIPE_QUERY_PRIMITIVES_GENERATED:
555 case PIPE_QUERY_SO_STATISTICS:
556 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
557 va += query->result_size/2;
558 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
559 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
560 radeon_emit(cs, va);
561 radeon_emit(cs, (va >> 32) & 0xFFFF);
562 break;
563 case PIPE_QUERY_TIME_ELAPSED:
564 va += query->result_size/2;
565 /* fall through */
566 case PIPE_QUERY_TIMESTAMP:
567 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
568 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
569 radeon_emit(cs, va);
570 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
571 radeon_emit(cs, 0);
572 radeon_emit(cs, 0);
573 break;
574 case PIPE_QUERY_PIPELINE_STATISTICS:
575 va += query->result_size/2;
576 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
577 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
578 radeon_emit(cs, va);
579 radeon_emit(cs, (va >> 32) & 0xFFFF);
580 break;
581 default:
582 assert(0);
583 }
584 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
585 RADEON_PRIO_QUERY);
586 }
587
588 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
589 struct r600_query_hw *query)
590 {
591 uint64_t va;
592
593 if (!query->buffer.buf)
594 return; // previous buffer allocation failure
595
596 /* The queries which need begin already called this in begin_query. */
597 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
598 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
599 }
600
601 /* emit end query */
602 va = query->buffer.buf->gpu_address + query->buffer.results_end;
603
604 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
605
606 query->buffer.results_end += query->result_size;
607
608 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
609 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
610
611 r600_update_occlusion_query_state(ctx, query->b.type, -1);
612 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
613 }
614
615 static void r600_emit_query_predication(struct r600_common_context *ctx,
616 struct r600_atom *atom)
617 {
618 struct radeon_winsys_cs *cs = ctx->gfx.cs;
619 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
620 struct r600_query_buffer *qbuf;
621 uint32_t op;
622 bool flag_wait;
623
624 if (!query)
625 return;
626
627 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
628 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
629
630 switch (query->b.type) {
631 case PIPE_QUERY_OCCLUSION_COUNTER:
632 case PIPE_QUERY_OCCLUSION_PREDICATE:
633 op = PRED_OP(PREDICATION_OP_ZPASS);
634 break;
635 case PIPE_QUERY_PRIMITIVES_EMITTED:
636 case PIPE_QUERY_PRIMITIVES_GENERATED:
637 case PIPE_QUERY_SO_STATISTICS:
638 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
639 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
640 break;
641 default:
642 assert(0);
643 return;
644 }
645
646 /* if true then invert, see GL_ARB_conditional_render_inverted */
647 if (ctx->render_cond_invert)
648 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
649 else
650 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
651
652 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
653
654 /* emit predicate packets for all data blocks */
655 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
656 unsigned results_base = 0;
657 uint64_t va = qbuf->buf->gpu_address;
658
659 while (results_base < qbuf->results_end) {
660 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
661 radeon_emit(cs, va + results_base);
662 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
663 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
664 RADEON_PRIO_QUERY);
665 results_base += query->result_size;
666
667 /* set CONTINUE bit for all packets except the first */
668 op |= PREDICATION_CONTINUE;
669 }
670 }
671 }
672
673 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
674 {
675 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
676
677 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
678 query_type == PIPE_QUERY_GPU_FINISHED ||
679 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
680 return r600_query_sw_create(ctx, query_type);
681
682 return r600_query_hw_create(rctx, query_type, index);
683 }
684
685 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
686 {
687 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
688 struct r600_query *rquery = (struct r600_query *)query;
689
690 rquery->ops->destroy(rctx, rquery);
691 }
692
693 static boolean r600_begin_query(struct pipe_context *ctx,
694 struct pipe_query *query)
695 {
696 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
697 struct r600_query *rquery = (struct r600_query *)query;
698
699 return rquery->ops->begin(rctx, rquery);
700 }
701
702 static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
703 struct r600_query_hw *query)
704 {
705 struct r600_query_buffer *prev = query->buffer.previous;
706
707 /* Discard the old query buffers. */
708 while (prev) {
709 struct r600_query_buffer *qbuf = prev;
710 prev = prev->previous;
711 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
712 FREE(qbuf);
713 }
714
715 query->buffer.results_end = 0;
716 query->buffer.previous = NULL;
717
718 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
719 /* Obtain a new buffer if the current one can't be mapped without a stall. */
720 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
721 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
722 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
723 query->buffer.buf = r600_new_query_buffer(rctx, query);
724 } else {
725 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
726 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
727 }
728 }
729 }
730
731 boolean r600_query_hw_begin(struct r600_common_context *rctx,
732 struct r600_query *rquery)
733 {
734 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
735
736 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
737 assert(0);
738 return false;
739 }
740
741 r600_query_hw_reset_buffers(rctx, query);
742
743 r600_query_hw_emit_start(rctx, query);
744 if (!query->buffer.buf)
745 return false;
746
747 LIST_ADDTAIL(&query->list, &rctx->active_queries);
748 return true;
749 }
750
751 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
752 {
753 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
754 struct r600_query *rquery = (struct r600_query *)query;
755
756 return rquery->ops->end(rctx, rquery);
757 }
758
759 bool r600_query_hw_end(struct r600_common_context *rctx,
760 struct r600_query *rquery)
761 {
762 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
763
764 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
765 r600_query_hw_reset_buffers(rctx, query);
766
767 r600_query_hw_emit_stop(rctx, query);
768
769 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
770 LIST_DELINIT(&query->list);
771
772 if (!query->buffer.buf)
773 return false;
774
775 return true;
776 }
777
778 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
779 bool test_status_bit)
780 {
781 uint32_t *current_result = (uint32_t*)map;
782 uint64_t start, end;
783
784 start = (uint64_t)current_result[start_index] |
785 (uint64_t)current_result[start_index+1] << 32;
786 end = (uint64_t)current_result[end_index] |
787 (uint64_t)current_result[end_index+1] << 32;
788
789 if (!test_status_bit ||
790 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
791 return end - start;
792 }
793 return 0;
794 }
795
796 static void r600_query_hw_add_result(struct r600_common_context *ctx,
797 struct r600_query_hw *query,
798 void *buffer,
799 union pipe_query_result *result)
800 {
801 switch (query->b.type) {
802 case PIPE_QUERY_OCCLUSION_COUNTER: {
803 unsigned results_base = 0;
804 while (results_base != query->result_size) {
805 result->u64 +=
806 r600_query_read_result(buffer + results_base, 0, 2, true);
807 results_base += 16;
808 }
809 break;
810 }
811 case PIPE_QUERY_OCCLUSION_PREDICATE: {
812 unsigned results_base = 0;
813 while (results_base != query->result_size) {
814 result->b = result->b ||
815 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
816 results_base += 16;
817 }
818 break;
819 }
820 case PIPE_QUERY_TIME_ELAPSED:
821 result->u64 += r600_query_read_result(buffer, 0, 2, false);
822 break;
823 case PIPE_QUERY_TIMESTAMP:
824 {
825 uint32_t *current_result = (uint32_t*)buffer;
826 result->u64 = (uint64_t)current_result[0] |
827 (uint64_t)current_result[1] << 32;
828 break;
829 }
830 case PIPE_QUERY_PRIMITIVES_EMITTED:
831 /* SAMPLE_STREAMOUTSTATS stores this structure:
832 * {
833 * u64 NumPrimitivesWritten;
834 * u64 PrimitiveStorageNeeded;
835 * }
836 * We only need NumPrimitivesWritten here. */
837 result->u64 += r600_query_read_result(buffer, 2, 6, true);
838 break;
839 case PIPE_QUERY_PRIMITIVES_GENERATED:
840 /* Here we read PrimitiveStorageNeeded. */
841 result->u64 += r600_query_read_result(buffer, 0, 4, true);
842 break;
843 case PIPE_QUERY_SO_STATISTICS:
844 result->so_statistics.num_primitives_written +=
845 r600_query_read_result(buffer, 2, 6, true);
846 result->so_statistics.primitives_storage_needed +=
847 r600_query_read_result(buffer, 0, 4, true);
848 break;
849 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
850 result->b = result->b ||
851 r600_query_read_result(buffer, 2, 6, true) !=
852 r600_query_read_result(buffer, 0, 4, true);
853 break;
854 case PIPE_QUERY_PIPELINE_STATISTICS:
855 if (ctx->chip_class >= EVERGREEN) {
856 result->pipeline_statistics.ps_invocations +=
857 r600_query_read_result(buffer, 0, 22, false);
858 result->pipeline_statistics.c_primitives +=
859 r600_query_read_result(buffer, 2, 24, false);
860 result->pipeline_statistics.c_invocations +=
861 r600_query_read_result(buffer, 4, 26, false);
862 result->pipeline_statistics.vs_invocations +=
863 r600_query_read_result(buffer, 6, 28, false);
864 result->pipeline_statistics.gs_invocations +=
865 r600_query_read_result(buffer, 8, 30, false);
866 result->pipeline_statistics.gs_primitives +=
867 r600_query_read_result(buffer, 10, 32, false);
868 result->pipeline_statistics.ia_primitives +=
869 r600_query_read_result(buffer, 12, 34, false);
870 result->pipeline_statistics.ia_vertices +=
871 r600_query_read_result(buffer, 14, 36, false);
872 result->pipeline_statistics.hs_invocations +=
873 r600_query_read_result(buffer, 16, 38, false);
874 result->pipeline_statistics.ds_invocations +=
875 r600_query_read_result(buffer, 18, 40, false);
876 result->pipeline_statistics.cs_invocations +=
877 r600_query_read_result(buffer, 20, 42, false);
878 } else {
879 result->pipeline_statistics.ps_invocations +=
880 r600_query_read_result(buffer, 0, 16, false);
881 result->pipeline_statistics.c_primitives +=
882 r600_query_read_result(buffer, 2, 18, false);
883 result->pipeline_statistics.c_invocations +=
884 r600_query_read_result(buffer, 4, 20, false);
885 result->pipeline_statistics.vs_invocations +=
886 r600_query_read_result(buffer, 6, 22, false);
887 result->pipeline_statistics.gs_invocations +=
888 r600_query_read_result(buffer, 8, 24, false);
889 result->pipeline_statistics.gs_primitives +=
890 r600_query_read_result(buffer, 10, 26, false);
891 result->pipeline_statistics.ia_primitives +=
892 r600_query_read_result(buffer, 12, 28, false);
893 result->pipeline_statistics.ia_vertices +=
894 r600_query_read_result(buffer, 14, 30, false);
895 }
896 #if 0 /* for testing */
897 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
898 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
899 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
900 result->pipeline_statistics.ia_vertices,
901 result->pipeline_statistics.ia_primitives,
902 result->pipeline_statistics.vs_invocations,
903 result->pipeline_statistics.hs_invocations,
904 result->pipeline_statistics.ds_invocations,
905 result->pipeline_statistics.gs_invocations,
906 result->pipeline_statistics.gs_primitives,
907 result->pipeline_statistics.c_invocations,
908 result->pipeline_statistics.c_primitives,
909 result->pipeline_statistics.ps_invocations,
910 result->pipeline_statistics.cs_invocations);
911 #endif
912 break;
913 default:
914 assert(0);
915 }
916 }
917
918 static boolean r600_get_query_result(struct pipe_context *ctx,
919 struct pipe_query *query, boolean wait,
920 union pipe_query_result *result)
921 {
922 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
923 struct r600_query *rquery = (struct r600_query *)query;
924
925 return rquery->ops->get_result(rctx, rquery, wait, result);
926 }
927
928 static void r600_query_hw_clear_result(struct r600_query_hw *query,
929 union pipe_query_result *result)
930 {
931 util_query_clear_result(result, query->b.type);
932 }
933
934 boolean r600_query_hw_get_result(struct r600_common_context *rctx,
935 struct r600_query *rquery,
936 boolean wait, union pipe_query_result *result)
937 {
938 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
939 struct r600_query_buffer *qbuf;
940
941 query->ops->clear_result(query, result);
942
943 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
944 unsigned results_base = 0;
945 void *map;
946
947 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
948 PIPE_TRANSFER_READ |
949 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
950 if (!map)
951 return FALSE;
952
953 while (results_base != qbuf->results_end) {
954 query->ops->add_result(rctx, query, map + results_base,
955 result);
956 results_base += query->result_size;
957 }
958 }
959
960 /* Convert the time to expected units. */
961 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
962 rquery->type == PIPE_QUERY_TIMESTAMP) {
963 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
964 }
965 return TRUE;
966 }
967
968 static void r600_render_condition(struct pipe_context *ctx,
969 struct pipe_query *query,
970 boolean condition,
971 uint mode)
972 {
973 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
974 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
975 struct r600_query_buffer *qbuf;
976 struct r600_atom *atom = &rctx->render_cond_atom;
977
978 rctx->render_cond = query;
979 rctx->render_cond_invert = condition;
980 rctx->render_cond_mode = mode;
981
982 /* Compute the size of SET_PREDICATION packets. */
983 atom->num_dw = 0;
984 if (query) {
985 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
986 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
987 }
988
989 rctx->set_atom_dirty(rctx, atom, query != NULL);
990 }
991
992 void r600_suspend_queries(struct r600_common_context *ctx)
993 {
994 struct r600_query_hw *query;
995
996 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
997 r600_query_hw_emit_stop(ctx, query);
998 }
999 assert(ctx->num_cs_dw_queries_suspend == 0);
1000 }
1001
1002 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1003 struct list_head *query_list)
1004 {
1005 struct r600_query_hw *query;
1006 unsigned num_dw = 0;
1007
1008 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1009 /* begin + end */
1010 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1011
1012 /* Workaround for the fact that
1013 * num_cs_dw_nontimer_queries_suspend is incremented for every
1014 * resumed query, which raises the bar in need_cs_space for
1015 * queries about to be resumed.
1016 */
1017 num_dw += query->num_cs_dw_end;
1018 }
1019 /* primitives generated query */
1020 num_dw += ctx->streamout.enable_atom.num_dw;
1021 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1022 num_dw += 13;
1023
1024 return num_dw;
1025 }
1026
1027 void r600_resume_queries(struct r600_common_context *ctx)
1028 {
1029 struct r600_query_hw *query;
1030 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1031
1032 assert(ctx->num_cs_dw_queries_suspend == 0);
1033
1034 /* Check CS space here. Resuming must not be interrupted by flushes. */
1035 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
1036
1037 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1038 r600_query_hw_emit_start(ctx, query);
1039 }
1040 }
1041
1042 /* Get backends mask */
1043 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1044 {
1045 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1046 struct r600_resource *buffer;
1047 uint32_t *results;
1048 unsigned num_backends = ctx->screen->info.num_render_backends;
1049 unsigned i, mask = 0;
1050
1051 /* if backend_map query is supported by the kernel */
1052 if (ctx->screen->info.r600_gb_backend_map_valid) {
1053 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1054 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1055 unsigned item_width, item_mask;
1056
1057 if (ctx->chip_class >= EVERGREEN) {
1058 item_width = 4;
1059 item_mask = 0x7;
1060 } else {
1061 item_width = 2;
1062 item_mask = 0x3;
1063 }
1064
1065 while (num_tile_pipes--) {
1066 i = backend_map & item_mask;
1067 mask |= (1<<i);
1068 backend_map >>= item_width;
1069 }
1070 if (mask != 0) {
1071 ctx->backend_mask = mask;
1072 return;
1073 }
1074 }
1075
1076 /* otherwise backup path for older kernels */
1077
1078 /* create buffer for event data */
1079 buffer = (struct r600_resource*)
1080 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1081 PIPE_USAGE_STAGING, ctx->max_db*16);
1082 if (!buffer)
1083 goto err;
1084
1085 /* initialize buffer with zeroes */
1086 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1087 if (results) {
1088 memset(results, 0, ctx->max_db * 4 * 4);
1089
1090 /* emit EVENT_WRITE for ZPASS_DONE */
1091 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1092 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1093 radeon_emit(cs, buffer->gpu_address);
1094 radeon_emit(cs, buffer->gpu_address >> 32);
1095
1096 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1097 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1098
1099 /* analyze results */
1100 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1101 if (results) {
1102 for(i = 0; i < ctx->max_db; i++) {
1103 /* at least highest bit will be set if backend is used */
1104 if (results[i*4 + 1])
1105 mask |= (1<<i);
1106 }
1107 }
1108 }
1109
1110 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1111
1112 if (mask != 0) {
1113 ctx->backend_mask = mask;
1114 return;
1115 }
1116
1117 err:
1118 /* fallback to old method - set num_backends lower bits to 1 */
1119 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1120 return;
1121 }
1122
1123 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1124 { \
1125 .name = name_, \
1126 .query_type = R600_QUERY_##query_type_, \
1127 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1128 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1129 .group_id = group_id_ \
1130 }
1131
1132 #define X(name_, query_type_, type_, result_type_) \
1133 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1134
1135 #define XG(group_, name_, query_type_, type_, result_type_) \
1136 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1137
1138 static struct pipe_driver_query_info r600_driver_query_list[] = {
1139 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1140 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1141 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1142 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1143 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1144 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1145 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1146 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1147 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1148 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1149
1150 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1151 * which use it as a fallback path to detect the GPU type.
1152 *
1153 * Note: The names of these queries are significant for GPUPerfStudio
1154 * (and possibly their order as well). */
1155 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1156 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1157 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1158 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1159 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1160
1161 /* The following queries must be at the end of the list because their
1162 * availability is adjusted dynamically based on the DRM version. */
1163 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1164 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1165 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1166 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1167 };
1168
1169 #undef X
1170 #undef XG
1171 #undef XFULL
1172
1173 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1174 {
1175 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1176 return Elements(r600_driver_query_list);
1177 else if (rscreen->info.drm_major == 3)
1178 return Elements(r600_driver_query_list) - 3;
1179 else
1180 return Elements(r600_driver_query_list) - 4;
1181 }
1182
1183 static int r600_get_driver_query_info(struct pipe_screen *screen,
1184 unsigned index,
1185 struct pipe_driver_query_info *info)
1186 {
1187 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1188 unsigned num_queries = r600_get_num_queries(rscreen);
1189
1190 if (!info) {
1191 unsigned num_perfcounters =
1192 r600_get_perfcounter_info(rscreen, 0, NULL);
1193
1194 return num_queries + num_perfcounters;
1195 }
1196
1197 if (index >= num_queries)
1198 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1199
1200 *info = r600_driver_query_list[index];
1201
1202 switch (info->query_type) {
1203 case R600_QUERY_REQUESTED_VRAM:
1204 case R600_QUERY_VRAM_USAGE:
1205 info->max_value.u64 = rscreen->info.vram_size;
1206 break;
1207 case R600_QUERY_REQUESTED_GTT:
1208 case R600_QUERY_GTT_USAGE:
1209 info->max_value.u64 = rscreen->info.gart_size;
1210 break;
1211 case R600_QUERY_GPU_TEMPERATURE:
1212 info->max_value.u64 = 125;
1213 break;
1214 }
1215
1216 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1217 info->group_id += rscreen->perfcounters->num_groups;
1218
1219 return 1;
1220 }
1221
1222 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1223 * performance counter groups, so be careful when changing this and related
1224 * functions.
1225 */
1226 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1227 unsigned index,
1228 struct pipe_driver_query_group_info *info)
1229 {
1230 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1231 unsigned num_pc_groups = 0;
1232
1233 if (rscreen->perfcounters)
1234 num_pc_groups = rscreen->perfcounters->num_groups;
1235
1236 if (!info)
1237 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1238
1239 if (index < num_pc_groups)
1240 return r600_get_perfcounter_group_info(rscreen, index, info);
1241
1242 index -= num_pc_groups;
1243 if (index >= R600_NUM_SW_QUERY_GROUPS)
1244 return 0;
1245
1246 info->name = "GPIN";
1247 info->max_active_queries = 5;
1248 info->num_queries = 5;
1249 return 1;
1250 }
1251
1252 void r600_query_init(struct r600_common_context *rctx)
1253 {
1254 rctx->b.create_query = r600_create_query;
1255 rctx->b.create_batch_query = r600_create_batch_query;
1256 rctx->b.destroy_query = r600_destroy_query;
1257 rctx->b.begin_query = r600_begin_query;
1258 rctx->b.end_query = r600_end_query;
1259 rctx->b.get_query_result = r600_get_query_result;
1260 rctx->render_cond_atom.emit = r600_emit_query_predication;
1261
1262 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1263 rctx->b.render_condition = r600_render_condition;
1264
1265 LIST_INITHEAD(&rctx->active_queries);
1266 }
1267
1268 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1269 {
1270 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1271 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1272 }