Treewide: Remove Elements() macro
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw {
31 struct r600_query b;
32
33 uint64_t begin_result;
34 uint64_t end_result;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle *fence;
37 };
38
39 static void r600_query_sw_destroy(struct r600_common_context *rctx,
40 struct r600_query *rquery)
41 {
42 struct pipe_screen *screen = rctx->b.screen;
43 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
44
45 screen->fence_reference(screen, &query->fence, NULL);
46 FREE(query);
47 }
48
49 static enum radeon_value_id winsys_id_from_type(unsigned type)
50 {
51 switch (type) {
52 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
53 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
54 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
55 case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
56 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
57 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
58 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
59 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
60 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
61 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
62 default: unreachable("query type does not correspond to winsys id");
63 }
64 }
65
66 static boolean r600_query_sw_begin(struct r600_common_context *rctx,
67 struct r600_query *rquery)
68 {
69 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
70
71 switch(query->b.type) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT:
73 case PIPE_QUERY_GPU_FINISHED:
74 break;
75 case R600_QUERY_DRAW_CALLS:
76 query->begin_result = rctx->num_draw_calls;
77 break;
78 case R600_QUERY_REQUESTED_VRAM:
79 case R600_QUERY_REQUESTED_GTT:
80 case R600_QUERY_VRAM_USAGE:
81 case R600_QUERY_GTT_USAGE:
82 case R600_QUERY_GPU_TEMPERATURE:
83 case R600_QUERY_CURRENT_GPU_SCLK:
84 case R600_QUERY_CURRENT_GPU_MCLK:
85 query->begin_result = 0;
86 break;
87 case R600_QUERY_BUFFER_WAIT_TIME:
88 case R600_QUERY_NUM_CS_FLUSHES:
89 case R600_QUERY_NUM_BYTES_MOVED: {
90 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
91 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
92 break;
93 }
94 case R600_QUERY_GPU_LOAD:
95 query->begin_result = r600_gpu_load_begin(rctx->screen);
96 break;
97 case R600_QUERY_NUM_COMPILATIONS:
98 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
99 break;
100 case R600_QUERY_NUM_SHADERS_CREATED:
101 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
102 break;
103 case R600_QUERY_GPIN_ASIC_ID:
104 case R600_QUERY_GPIN_NUM_SIMD:
105 case R600_QUERY_GPIN_NUM_RB:
106 case R600_QUERY_GPIN_NUM_SPI:
107 case R600_QUERY_GPIN_NUM_SE:
108 break;
109 default:
110 unreachable("r600_query_sw_begin: bad query type");
111 }
112
113 return TRUE;
114 }
115
116 static bool r600_query_sw_end(struct r600_common_context *rctx,
117 struct r600_query *rquery)
118 {
119 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
120
121 switch(query->b.type) {
122 case PIPE_QUERY_TIMESTAMP_DISJOINT:
123 break;
124 case PIPE_QUERY_GPU_FINISHED:
125 rctx->b.flush(&rctx->b, &query->fence, 0);
126 break;
127 case R600_QUERY_DRAW_CALLS:
128 query->end_result = rctx->num_draw_calls;
129 break;
130 case R600_QUERY_REQUESTED_VRAM:
131 case R600_QUERY_REQUESTED_GTT:
132 case R600_QUERY_VRAM_USAGE:
133 case R600_QUERY_GTT_USAGE:
134 case R600_QUERY_GPU_TEMPERATURE:
135 case R600_QUERY_CURRENT_GPU_SCLK:
136 case R600_QUERY_CURRENT_GPU_MCLK:
137 case R600_QUERY_BUFFER_WAIT_TIME:
138 case R600_QUERY_NUM_CS_FLUSHES:
139 case R600_QUERY_NUM_BYTES_MOVED: {
140 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
141 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
142 break;
143 }
144 case R600_QUERY_GPU_LOAD:
145 query->end_result = r600_gpu_load_end(rctx->screen,
146 query->begin_result);
147 query->begin_result = 0;
148 break;
149 case R600_QUERY_NUM_COMPILATIONS:
150 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
151 break;
152 case R600_QUERY_NUM_SHADERS_CREATED:
153 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
154 break;
155 case R600_QUERY_GPIN_ASIC_ID:
156 case R600_QUERY_GPIN_NUM_SIMD:
157 case R600_QUERY_GPIN_NUM_RB:
158 case R600_QUERY_GPIN_NUM_SPI:
159 case R600_QUERY_GPIN_NUM_SE:
160 break;
161 default:
162 unreachable("r600_query_sw_end: bad query type");
163 }
164
165 return true;
166 }
167
168 static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
169 struct r600_query *rquery,
170 boolean wait,
171 union pipe_query_result *result)
172 {
173 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
174
175 switch (query->b.type) {
176 case PIPE_QUERY_TIMESTAMP_DISJOINT:
177 /* Convert from cycles per millisecond to cycles per second (Hz). */
178 result->timestamp_disjoint.frequency =
179 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
180 result->timestamp_disjoint.disjoint = FALSE;
181 return TRUE;
182 case PIPE_QUERY_GPU_FINISHED: {
183 struct pipe_screen *screen = rctx->b.screen;
184 result->b = screen->fence_finish(screen, query->fence,
185 wait ? PIPE_TIMEOUT_INFINITE : 0);
186 return result->b;
187 }
188
189 case R600_QUERY_GPIN_ASIC_ID:
190 result->u32 = 0;
191 return TRUE;
192 case R600_QUERY_GPIN_NUM_SIMD:
193 result->u32 = rctx->screen->info.num_good_compute_units;
194 return TRUE;
195 case R600_QUERY_GPIN_NUM_RB:
196 result->u32 = rctx->screen->info.num_render_backends;
197 return TRUE;
198 case R600_QUERY_GPIN_NUM_SPI:
199 result->u32 = 1; /* all supported chips have one SPI per SE */
200 return TRUE;
201 case R600_QUERY_GPIN_NUM_SE:
202 result->u32 = rctx->screen->info.max_se;
203 return TRUE;
204 }
205
206 result->u64 = query->end_result - query->begin_result;
207
208 switch (query->b.type) {
209 case R600_QUERY_BUFFER_WAIT_TIME:
210 case R600_QUERY_GPU_TEMPERATURE:
211 result->u64 /= 1000;
212 break;
213 case R600_QUERY_CURRENT_GPU_SCLK:
214 case R600_QUERY_CURRENT_GPU_MCLK:
215 result->u64 *= 1000000;
216 break;
217 }
218
219 return TRUE;
220 }
221
222 static struct r600_query_ops sw_query_ops = {
223 .destroy = r600_query_sw_destroy,
224 .begin = r600_query_sw_begin,
225 .end = r600_query_sw_end,
226 .get_result = r600_query_sw_get_result
227 };
228
229 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
230 unsigned query_type)
231 {
232 struct r600_query_sw *query;
233
234 query = CALLOC_STRUCT(r600_query_sw);
235 if (!query)
236 return NULL;
237
238 query->b.type = query_type;
239 query->b.ops = &sw_query_ops;
240
241 return (struct pipe_query *)query;
242 }
243
244 void r600_query_hw_destroy(struct r600_common_context *rctx,
245 struct r600_query *rquery)
246 {
247 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
248 struct r600_query_buffer *prev = query->buffer.previous;
249
250 /* Release all query buffers. */
251 while (prev) {
252 struct r600_query_buffer *qbuf = prev;
253 prev = prev->previous;
254 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
255 FREE(qbuf);
256 }
257
258 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
259 FREE(rquery);
260 }
261
262 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
263 struct r600_query_hw *query)
264 {
265 unsigned buf_size = MAX2(query->result_size,
266 ctx->screen->info.gart_page_size);
267
268 /* Queries are normally read by the CPU after
269 * being written by the gpu, hence staging is probably a good
270 * usage pattern.
271 */
272 struct r600_resource *buf = (struct r600_resource*)
273 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
274 PIPE_USAGE_STAGING, buf_size);
275 if (!buf)
276 return NULL;
277
278 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
279 if (!query->ops->prepare_buffer(ctx, query, buf)) {
280 pipe_resource_reference((struct pipe_resource **)&buf, NULL);
281 return NULL;
282 }
283 }
284
285 return buf;
286 }
287
288 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
289 struct r600_query_hw *query,
290 struct r600_resource *buffer)
291 {
292 /* Callers ensure that the buffer is currently unused by the GPU. */
293 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
294 PIPE_TRANSFER_WRITE |
295 PIPE_TRANSFER_UNSYNCHRONIZED);
296 if (!results)
297 return false;
298
299 memset(results, 0, buffer->b.b.width0);
300
301 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
302 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
303 unsigned num_results;
304 unsigned i, j;
305
306 /* Set top bits for unused backends. */
307 num_results = buffer->b.b.width0 / (16 * ctx->max_db);
308 for (j = 0; j < num_results; j++) {
309 for (i = 0; i < ctx->max_db; i++) {
310 if (!(ctx->backend_mask & (1<<i))) {
311 results[(i * 4)+1] = 0x80000000;
312 results[(i * 4)+3] = 0x80000000;
313 }
314 }
315 results += 4 * ctx->max_db;
316 }
317 }
318
319 return true;
320 }
321
322 static struct r600_query_ops query_hw_ops = {
323 .destroy = r600_query_hw_destroy,
324 .begin = r600_query_hw_begin,
325 .end = r600_query_hw_end,
326 .get_result = r600_query_hw_get_result,
327 };
328
329 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
330 struct r600_query_hw *query,
331 struct r600_resource *buffer,
332 uint64_t va);
333 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
334 struct r600_query_hw *query,
335 struct r600_resource *buffer,
336 uint64_t va);
337 static void r600_query_hw_add_result(struct r600_common_context *ctx,
338 struct r600_query_hw *, void *buffer,
339 union pipe_query_result *result);
340 static void r600_query_hw_clear_result(struct r600_query_hw *,
341 union pipe_query_result *);
342
343 static struct r600_query_hw_ops query_hw_default_hw_ops = {
344 .prepare_buffer = r600_query_hw_prepare_buffer,
345 .emit_start = r600_query_hw_do_emit_start,
346 .emit_stop = r600_query_hw_do_emit_stop,
347 .clear_result = r600_query_hw_clear_result,
348 .add_result = r600_query_hw_add_result,
349 };
350
351 boolean r600_query_hw_init(struct r600_common_context *rctx,
352 struct r600_query_hw *query)
353 {
354 query->buffer.buf = r600_new_query_buffer(rctx, query);
355 if (!query->buffer.buf)
356 return FALSE;
357
358 return TRUE;
359 }
360
361 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
362 unsigned query_type,
363 unsigned index)
364 {
365 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
366 if (!query)
367 return NULL;
368
369 query->b.type = query_type;
370 query->b.ops = &query_hw_ops;
371 query->ops = &query_hw_default_hw_ops;
372
373 switch (query_type) {
374 case PIPE_QUERY_OCCLUSION_COUNTER:
375 case PIPE_QUERY_OCCLUSION_PREDICATE:
376 query->result_size = 16 * rctx->max_db;
377 query->num_cs_dw_begin = 6;
378 query->num_cs_dw_end = 6;
379 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
380 break;
381 case PIPE_QUERY_TIME_ELAPSED:
382 query->result_size = 16;
383 query->num_cs_dw_begin = 8;
384 query->num_cs_dw_end = 8;
385 break;
386 case PIPE_QUERY_TIMESTAMP:
387 query->result_size = 8;
388 query->num_cs_dw_end = 8;
389 query->flags = R600_QUERY_HW_FLAG_NO_START;
390 break;
391 case PIPE_QUERY_PRIMITIVES_EMITTED:
392 case PIPE_QUERY_PRIMITIVES_GENERATED:
393 case PIPE_QUERY_SO_STATISTICS:
394 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
395 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
396 query->result_size = 32;
397 query->num_cs_dw_begin = 6;
398 query->num_cs_dw_end = 6;
399 query->stream = index;
400 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
401 break;
402 case PIPE_QUERY_PIPELINE_STATISTICS:
403 /* 11 values on EG, 8 on R600. */
404 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
405 query->num_cs_dw_begin = 6;
406 query->num_cs_dw_end = 6;
407 break;
408 default:
409 assert(0);
410 FREE(query);
411 return NULL;
412 }
413
414 if (!r600_query_hw_init(rctx, query)) {
415 FREE(query);
416 return NULL;
417 }
418
419 return (struct pipe_query *)query;
420 }
421
422 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
423 unsigned type, int diff)
424 {
425 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
426 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
427 bool old_enable = rctx->num_occlusion_queries != 0;
428 bool old_perfect_enable =
429 rctx->num_perfect_occlusion_queries != 0;
430 bool enable, perfect_enable;
431
432 rctx->num_occlusion_queries += diff;
433 assert(rctx->num_occlusion_queries >= 0);
434
435 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
436 rctx->num_perfect_occlusion_queries += diff;
437 assert(rctx->num_perfect_occlusion_queries >= 0);
438 }
439
440 enable = rctx->num_occlusion_queries != 0;
441 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
442
443 if (enable != old_enable || perfect_enable != old_perfect_enable) {
444 rctx->set_occlusion_query_state(&rctx->b, enable);
445 }
446 }
447 }
448
449 static unsigned event_type_for_stream(struct r600_query_hw *query)
450 {
451 switch (query->stream) {
452 default:
453 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
454 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
455 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
456 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
457 }
458 }
459
460 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
461 struct r600_query_hw *query,
462 struct r600_resource *buffer,
463 uint64_t va)
464 {
465 struct radeon_winsys_cs *cs = ctx->gfx.cs;
466
467 switch (query->b.type) {
468 case PIPE_QUERY_OCCLUSION_COUNTER:
469 case PIPE_QUERY_OCCLUSION_PREDICATE:
470 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
471 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
472 radeon_emit(cs, va);
473 radeon_emit(cs, (va >> 32) & 0xFFFF);
474 break;
475 case PIPE_QUERY_PRIMITIVES_EMITTED:
476 case PIPE_QUERY_PRIMITIVES_GENERATED:
477 case PIPE_QUERY_SO_STATISTICS:
478 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
479 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
480 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
481 radeon_emit(cs, va);
482 radeon_emit(cs, (va >> 32) & 0xFFFF);
483 break;
484 case PIPE_QUERY_TIME_ELAPSED:
485 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
486 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
487 radeon_emit(cs, va);
488 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
489 radeon_emit(cs, 0);
490 radeon_emit(cs, 0);
491 break;
492 case PIPE_QUERY_PIPELINE_STATISTICS:
493 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
494 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
495 radeon_emit(cs, va);
496 radeon_emit(cs, (va >> 32) & 0xFFFF);
497 break;
498 default:
499 assert(0);
500 }
501 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
502 RADEON_PRIO_QUERY);
503 }
504
505 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
506 struct r600_query_hw *query)
507 {
508 uint64_t va;
509
510 if (!query->buffer.buf)
511 return; // previous buffer allocation failure
512
513 r600_update_occlusion_query_state(ctx, query->b.type, 1);
514 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
515
516 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
517 TRUE);
518
519 /* Get a new query buffer if needed. */
520 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
521 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
522 *qbuf = query->buffer;
523 query->buffer.results_end = 0;
524 query->buffer.previous = qbuf;
525 query->buffer.buf = r600_new_query_buffer(ctx, query);
526 if (!query->buffer.buf)
527 return;
528 }
529
530 /* emit begin query */
531 va = query->buffer.buf->gpu_address + query->buffer.results_end;
532
533 query->ops->emit_start(ctx, query, query->buffer.buf, va);
534
535 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
536 }
537
538 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
539 struct r600_query_hw *query,
540 struct r600_resource *buffer,
541 uint64_t va)
542 {
543 struct radeon_winsys_cs *cs = ctx->gfx.cs;
544
545 switch (query->b.type) {
546 case PIPE_QUERY_OCCLUSION_COUNTER:
547 case PIPE_QUERY_OCCLUSION_PREDICATE:
548 va += 8;
549 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
550 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
551 radeon_emit(cs, va);
552 radeon_emit(cs, (va >> 32) & 0xFFFF);
553 break;
554 case PIPE_QUERY_PRIMITIVES_EMITTED:
555 case PIPE_QUERY_PRIMITIVES_GENERATED:
556 case PIPE_QUERY_SO_STATISTICS:
557 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
558 va += query->result_size/2;
559 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
560 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
561 radeon_emit(cs, va);
562 radeon_emit(cs, (va >> 32) & 0xFFFF);
563 break;
564 case PIPE_QUERY_TIME_ELAPSED:
565 va += query->result_size/2;
566 /* fall through */
567 case PIPE_QUERY_TIMESTAMP:
568 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
569 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
570 radeon_emit(cs, va);
571 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
572 radeon_emit(cs, 0);
573 radeon_emit(cs, 0);
574 break;
575 case PIPE_QUERY_PIPELINE_STATISTICS:
576 va += query->result_size/2;
577 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
578 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
579 radeon_emit(cs, va);
580 radeon_emit(cs, (va >> 32) & 0xFFFF);
581 break;
582 default:
583 assert(0);
584 }
585 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
586 RADEON_PRIO_QUERY);
587 }
588
589 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
590 struct r600_query_hw *query)
591 {
592 uint64_t va;
593
594 if (!query->buffer.buf)
595 return; // previous buffer allocation failure
596
597 /* The queries which need begin already called this in begin_query. */
598 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
599 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
600 }
601
602 /* emit end query */
603 va = query->buffer.buf->gpu_address + query->buffer.results_end;
604
605 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
606
607 query->buffer.results_end += query->result_size;
608
609 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
610 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
611
612 r600_update_occlusion_query_state(ctx, query->b.type, -1);
613 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
614 }
615
616 static void r600_emit_query_predication(struct r600_common_context *ctx,
617 struct r600_atom *atom)
618 {
619 struct radeon_winsys_cs *cs = ctx->gfx.cs;
620 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
621 struct r600_query_buffer *qbuf;
622 uint32_t op;
623 bool flag_wait;
624
625 if (!query)
626 return;
627
628 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
629 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
630
631 switch (query->b.type) {
632 case PIPE_QUERY_OCCLUSION_COUNTER:
633 case PIPE_QUERY_OCCLUSION_PREDICATE:
634 op = PRED_OP(PREDICATION_OP_ZPASS);
635 break;
636 case PIPE_QUERY_PRIMITIVES_EMITTED:
637 case PIPE_QUERY_PRIMITIVES_GENERATED:
638 case PIPE_QUERY_SO_STATISTICS:
639 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
640 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
641 break;
642 default:
643 assert(0);
644 return;
645 }
646
647 /* if true then invert, see GL_ARB_conditional_render_inverted */
648 if (ctx->render_cond_invert)
649 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
650 else
651 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
652
653 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
654
655 /* emit predicate packets for all data blocks */
656 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
657 unsigned results_base = 0;
658 uint64_t va = qbuf->buf->gpu_address;
659
660 while (results_base < qbuf->results_end) {
661 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
662 radeon_emit(cs, va + results_base);
663 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
664 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
665 RADEON_PRIO_QUERY);
666 results_base += query->result_size;
667
668 /* set CONTINUE bit for all packets except the first */
669 op |= PREDICATION_CONTINUE;
670 }
671 }
672 }
673
674 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
675 {
676 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
677
678 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
679 query_type == PIPE_QUERY_GPU_FINISHED ||
680 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
681 return r600_query_sw_create(ctx, query_type);
682
683 return r600_query_hw_create(rctx, query_type, index);
684 }
685
686 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
687 {
688 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
689 struct r600_query *rquery = (struct r600_query *)query;
690
691 rquery->ops->destroy(rctx, rquery);
692 }
693
694 static boolean r600_begin_query(struct pipe_context *ctx,
695 struct pipe_query *query)
696 {
697 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
698 struct r600_query *rquery = (struct r600_query *)query;
699
700 return rquery->ops->begin(rctx, rquery);
701 }
702
703 static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
704 struct r600_query_hw *query)
705 {
706 struct r600_query_buffer *prev = query->buffer.previous;
707
708 /* Discard the old query buffers. */
709 while (prev) {
710 struct r600_query_buffer *qbuf = prev;
711 prev = prev->previous;
712 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
713 FREE(qbuf);
714 }
715
716 query->buffer.results_end = 0;
717 query->buffer.previous = NULL;
718
719 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
720 /* Obtain a new buffer if the current one can't be mapped without a stall. */
721 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
722 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
723 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
724 query->buffer.buf = r600_new_query_buffer(rctx, query);
725 } else {
726 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
727 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
728 }
729 }
730 }
731
732 boolean r600_query_hw_begin(struct r600_common_context *rctx,
733 struct r600_query *rquery)
734 {
735 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
736
737 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
738 assert(0);
739 return false;
740 }
741
742 r600_query_hw_reset_buffers(rctx, query);
743
744 r600_query_hw_emit_start(rctx, query);
745 if (!query->buffer.buf)
746 return false;
747
748 LIST_ADDTAIL(&query->list, &rctx->active_queries);
749 return true;
750 }
751
752 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
753 {
754 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
755 struct r600_query *rquery = (struct r600_query *)query;
756
757 return rquery->ops->end(rctx, rquery);
758 }
759
760 bool r600_query_hw_end(struct r600_common_context *rctx,
761 struct r600_query *rquery)
762 {
763 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
764
765 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
766 r600_query_hw_reset_buffers(rctx, query);
767
768 r600_query_hw_emit_stop(rctx, query);
769
770 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
771 LIST_DELINIT(&query->list);
772
773 if (!query->buffer.buf)
774 return false;
775
776 return true;
777 }
778
779 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
780 bool test_status_bit)
781 {
782 uint32_t *current_result = (uint32_t*)map;
783 uint64_t start, end;
784
785 start = (uint64_t)current_result[start_index] |
786 (uint64_t)current_result[start_index+1] << 32;
787 end = (uint64_t)current_result[end_index] |
788 (uint64_t)current_result[end_index+1] << 32;
789
790 if (!test_status_bit ||
791 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
792 return end - start;
793 }
794 return 0;
795 }
796
797 static void r600_query_hw_add_result(struct r600_common_context *ctx,
798 struct r600_query_hw *query,
799 void *buffer,
800 union pipe_query_result *result)
801 {
802 switch (query->b.type) {
803 case PIPE_QUERY_OCCLUSION_COUNTER: {
804 unsigned results_base = 0;
805 while (results_base != query->result_size) {
806 result->u64 +=
807 r600_query_read_result(buffer + results_base, 0, 2, true);
808 results_base += 16;
809 }
810 break;
811 }
812 case PIPE_QUERY_OCCLUSION_PREDICATE: {
813 unsigned results_base = 0;
814 while (results_base != query->result_size) {
815 result->b = result->b ||
816 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
817 results_base += 16;
818 }
819 break;
820 }
821 case PIPE_QUERY_TIME_ELAPSED:
822 result->u64 += r600_query_read_result(buffer, 0, 2, false);
823 break;
824 case PIPE_QUERY_TIMESTAMP:
825 {
826 uint32_t *current_result = (uint32_t*)buffer;
827 result->u64 = (uint64_t)current_result[0] |
828 (uint64_t)current_result[1] << 32;
829 break;
830 }
831 case PIPE_QUERY_PRIMITIVES_EMITTED:
832 /* SAMPLE_STREAMOUTSTATS stores this structure:
833 * {
834 * u64 NumPrimitivesWritten;
835 * u64 PrimitiveStorageNeeded;
836 * }
837 * We only need NumPrimitivesWritten here. */
838 result->u64 += r600_query_read_result(buffer, 2, 6, true);
839 break;
840 case PIPE_QUERY_PRIMITIVES_GENERATED:
841 /* Here we read PrimitiveStorageNeeded. */
842 result->u64 += r600_query_read_result(buffer, 0, 4, true);
843 break;
844 case PIPE_QUERY_SO_STATISTICS:
845 result->so_statistics.num_primitives_written +=
846 r600_query_read_result(buffer, 2, 6, true);
847 result->so_statistics.primitives_storage_needed +=
848 r600_query_read_result(buffer, 0, 4, true);
849 break;
850 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
851 result->b = result->b ||
852 r600_query_read_result(buffer, 2, 6, true) !=
853 r600_query_read_result(buffer, 0, 4, true);
854 break;
855 case PIPE_QUERY_PIPELINE_STATISTICS:
856 if (ctx->chip_class >= EVERGREEN) {
857 result->pipeline_statistics.ps_invocations +=
858 r600_query_read_result(buffer, 0, 22, false);
859 result->pipeline_statistics.c_primitives +=
860 r600_query_read_result(buffer, 2, 24, false);
861 result->pipeline_statistics.c_invocations +=
862 r600_query_read_result(buffer, 4, 26, false);
863 result->pipeline_statistics.vs_invocations +=
864 r600_query_read_result(buffer, 6, 28, false);
865 result->pipeline_statistics.gs_invocations +=
866 r600_query_read_result(buffer, 8, 30, false);
867 result->pipeline_statistics.gs_primitives +=
868 r600_query_read_result(buffer, 10, 32, false);
869 result->pipeline_statistics.ia_primitives +=
870 r600_query_read_result(buffer, 12, 34, false);
871 result->pipeline_statistics.ia_vertices +=
872 r600_query_read_result(buffer, 14, 36, false);
873 result->pipeline_statistics.hs_invocations +=
874 r600_query_read_result(buffer, 16, 38, false);
875 result->pipeline_statistics.ds_invocations +=
876 r600_query_read_result(buffer, 18, 40, false);
877 result->pipeline_statistics.cs_invocations +=
878 r600_query_read_result(buffer, 20, 42, false);
879 } else {
880 result->pipeline_statistics.ps_invocations +=
881 r600_query_read_result(buffer, 0, 16, false);
882 result->pipeline_statistics.c_primitives +=
883 r600_query_read_result(buffer, 2, 18, false);
884 result->pipeline_statistics.c_invocations +=
885 r600_query_read_result(buffer, 4, 20, false);
886 result->pipeline_statistics.vs_invocations +=
887 r600_query_read_result(buffer, 6, 22, false);
888 result->pipeline_statistics.gs_invocations +=
889 r600_query_read_result(buffer, 8, 24, false);
890 result->pipeline_statistics.gs_primitives +=
891 r600_query_read_result(buffer, 10, 26, false);
892 result->pipeline_statistics.ia_primitives +=
893 r600_query_read_result(buffer, 12, 28, false);
894 result->pipeline_statistics.ia_vertices +=
895 r600_query_read_result(buffer, 14, 30, false);
896 }
897 #if 0 /* for testing */
898 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
899 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
900 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
901 result->pipeline_statistics.ia_vertices,
902 result->pipeline_statistics.ia_primitives,
903 result->pipeline_statistics.vs_invocations,
904 result->pipeline_statistics.hs_invocations,
905 result->pipeline_statistics.ds_invocations,
906 result->pipeline_statistics.gs_invocations,
907 result->pipeline_statistics.gs_primitives,
908 result->pipeline_statistics.c_invocations,
909 result->pipeline_statistics.c_primitives,
910 result->pipeline_statistics.ps_invocations,
911 result->pipeline_statistics.cs_invocations);
912 #endif
913 break;
914 default:
915 assert(0);
916 }
917 }
918
919 static boolean r600_get_query_result(struct pipe_context *ctx,
920 struct pipe_query *query, boolean wait,
921 union pipe_query_result *result)
922 {
923 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
924 struct r600_query *rquery = (struct r600_query *)query;
925
926 return rquery->ops->get_result(rctx, rquery, wait, result);
927 }
928
929 static void r600_query_hw_clear_result(struct r600_query_hw *query,
930 union pipe_query_result *result)
931 {
932 util_query_clear_result(result, query->b.type);
933 }
934
935 boolean r600_query_hw_get_result(struct r600_common_context *rctx,
936 struct r600_query *rquery,
937 boolean wait, union pipe_query_result *result)
938 {
939 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
940 struct r600_query_buffer *qbuf;
941
942 query->ops->clear_result(query, result);
943
944 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
945 unsigned results_base = 0;
946 void *map;
947
948 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
949 PIPE_TRANSFER_READ |
950 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
951 if (!map)
952 return FALSE;
953
954 while (results_base != qbuf->results_end) {
955 query->ops->add_result(rctx, query, map + results_base,
956 result);
957 results_base += query->result_size;
958 }
959 }
960
961 /* Convert the time to expected units. */
962 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
963 rquery->type == PIPE_QUERY_TIMESTAMP) {
964 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
965 }
966 return TRUE;
967 }
968
969 static void r600_render_condition(struct pipe_context *ctx,
970 struct pipe_query *query,
971 boolean condition,
972 uint mode)
973 {
974 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
975 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
976 struct r600_query_buffer *qbuf;
977 struct r600_atom *atom = &rctx->render_cond_atom;
978
979 rctx->render_cond = query;
980 rctx->render_cond_invert = condition;
981 rctx->render_cond_mode = mode;
982
983 /* Compute the size of SET_PREDICATION packets. */
984 atom->num_dw = 0;
985 if (query) {
986 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
987 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
988 }
989
990 rctx->set_atom_dirty(rctx, atom, query != NULL);
991 }
992
993 void r600_suspend_queries(struct r600_common_context *ctx)
994 {
995 struct r600_query_hw *query;
996
997 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
998 r600_query_hw_emit_stop(ctx, query);
999 }
1000 assert(ctx->num_cs_dw_queries_suspend == 0);
1001 }
1002
1003 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1004 struct list_head *query_list)
1005 {
1006 struct r600_query_hw *query;
1007 unsigned num_dw = 0;
1008
1009 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1010 /* begin + end */
1011 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1012
1013 /* Workaround for the fact that
1014 * num_cs_dw_nontimer_queries_suspend is incremented for every
1015 * resumed query, which raises the bar in need_cs_space for
1016 * queries about to be resumed.
1017 */
1018 num_dw += query->num_cs_dw_end;
1019 }
1020 /* primitives generated query */
1021 num_dw += ctx->streamout.enable_atom.num_dw;
1022 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1023 num_dw += 13;
1024
1025 return num_dw;
1026 }
1027
1028 void r600_resume_queries(struct r600_common_context *ctx)
1029 {
1030 struct r600_query_hw *query;
1031 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1032
1033 assert(ctx->num_cs_dw_queries_suspend == 0);
1034
1035 /* Check CS space here. Resuming must not be interrupted by flushes. */
1036 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
1037
1038 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1039 r600_query_hw_emit_start(ctx, query);
1040 }
1041 }
1042
1043 /* Get backends mask */
1044 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1045 {
1046 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1047 struct r600_resource *buffer;
1048 uint32_t *results;
1049 unsigned num_backends = ctx->screen->info.num_render_backends;
1050 unsigned i, mask = 0;
1051
1052 /* if backend_map query is supported by the kernel */
1053 if (ctx->screen->info.r600_gb_backend_map_valid) {
1054 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1055 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1056 unsigned item_width, item_mask;
1057
1058 if (ctx->chip_class >= EVERGREEN) {
1059 item_width = 4;
1060 item_mask = 0x7;
1061 } else {
1062 item_width = 2;
1063 item_mask = 0x3;
1064 }
1065
1066 while (num_tile_pipes--) {
1067 i = backend_map & item_mask;
1068 mask |= (1<<i);
1069 backend_map >>= item_width;
1070 }
1071 if (mask != 0) {
1072 ctx->backend_mask = mask;
1073 return;
1074 }
1075 }
1076
1077 /* otherwise backup path for older kernels */
1078
1079 /* create buffer for event data */
1080 buffer = (struct r600_resource*)
1081 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1082 PIPE_USAGE_STAGING, ctx->max_db*16);
1083 if (!buffer)
1084 goto err;
1085
1086 /* initialize buffer with zeroes */
1087 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1088 if (results) {
1089 memset(results, 0, ctx->max_db * 4 * 4);
1090
1091 /* emit EVENT_WRITE for ZPASS_DONE */
1092 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1093 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1094 radeon_emit(cs, buffer->gpu_address);
1095 radeon_emit(cs, buffer->gpu_address >> 32);
1096
1097 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1098 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1099
1100 /* analyze results */
1101 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1102 if (results) {
1103 for(i = 0; i < ctx->max_db; i++) {
1104 /* at least highest bit will be set if backend is used */
1105 if (results[i*4 + 1])
1106 mask |= (1<<i);
1107 }
1108 }
1109 }
1110
1111 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1112
1113 if (mask != 0) {
1114 ctx->backend_mask = mask;
1115 return;
1116 }
1117
1118 err:
1119 /* fallback to old method - set num_backends lower bits to 1 */
1120 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1121 return;
1122 }
1123
1124 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1125 { \
1126 .name = name_, \
1127 .query_type = R600_QUERY_##query_type_, \
1128 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1129 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1130 .group_id = group_id_ \
1131 }
1132
1133 #define X(name_, query_type_, type_, result_type_) \
1134 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1135
1136 #define XG(group_, name_, query_type_, type_, result_type_) \
1137 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1138
1139 static struct pipe_driver_query_info r600_driver_query_list[] = {
1140 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1141 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1142 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1143 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1144 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1145 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1146 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1147 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1148 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1149 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1150
1151 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1152 * which use it as a fallback path to detect the GPU type.
1153 *
1154 * Note: The names of these queries are significant for GPUPerfStudio
1155 * (and possibly their order as well). */
1156 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1157 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1158 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1159 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1160 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1161
1162 /* The following queries must be at the end of the list because their
1163 * availability is adjusted dynamically based on the DRM version. */
1164 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1165 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1166 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1167 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1168 };
1169
1170 #undef X
1171 #undef XG
1172 #undef XFULL
1173
1174 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1175 {
1176 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1177 return ARRAY_SIZE(r600_driver_query_list);
1178 else if (rscreen->info.drm_major == 3)
1179 return ARRAY_SIZE(r600_driver_query_list) - 3;
1180 else
1181 return ARRAY_SIZE(r600_driver_query_list) - 4;
1182 }
1183
1184 static int r600_get_driver_query_info(struct pipe_screen *screen,
1185 unsigned index,
1186 struct pipe_driver_query_info *info)
1187 {
1188 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1189 unsigned num_queries = r600_get_num_queries(rscreen);
1190
1191 if (!info) {
1192 unsigned num_perfcounters =
1193 r600_get_perfcounter_info(rscreen, 0, NULL);
1194
1195 return num_queries + num_perfcounters;
1196 }
1197
1198 if (index >= num_queries)
1199 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1200
1201 *info = r600_driver_query_list[index];
1202
1203 switch (info->query_type) {
1204 case R600_QUERY_REQUESTED_VRAM:
1205 case R600_QUERY_VRAM_USAGE:
1206 info->max_value.u64 = rscreen->info.vram_size;
1207 break;
1208 case R600_QUERY_REQUESTED_GTT:
1209 case R600_QUERY_GTT_USAGE:
1210 info->max_value.u64 = rscreen->info.gart_size;
1211 break;
1212 case R600_QUERY_GPU_TEMPERATURE:
1213 info->max_value.u64 = 125;
1214 break;
1215 }
1216
1217 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1218 info->group_id += rscreen->perfcounters->num_groups;
1219
1220 return 1;
1221 }
1222
1223 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1224 * performance counter groups, so be careful when changing this and related
1225 * functions.
1226 */
1227 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1228 unsigned index,
1229 struct pipe_driver_query_group_info *info)
1230 {
1231 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1232 unsigned num_pc_groups = 0;
1233
1234 if (rscreen->perfcounters)
1235 num_pc_groups = rscreen->perfcounters->num_groups;
1236
1237 if (!info)
1238 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1239
1240 if (index < num_pc_groups)
1241 return r600_get_perfcounter_group_info(rscreen, index, info);
1242
1243 index -= num_pc_groups;
1244 if (index >= R600_NUM_SW_QUERY_GROUPS)
1245 return 0;
1246
1247 info->name = "GPIN";
1248 info->max_active_queries = 5;
1249 info->num_queries = 5;
1250 return 1;
1251 }
1252
1253 void r600_query_init(struct r600_common_context *rctx)
1254 {
1255 rctx->b.create_query = r600_create_query;
1256 rctx->b.create_batch_query = r600_create_batch_query;
1257 rctx->b.destroy_query = r600_destroy_query;
1258 rctx->b.begin_query = r600_begin_query;
1259 rctx->b.end_query = r600_end_query;
1260 rctx->b.get_query_result = r600_get_query_result;
1261 rctx->render_cond_atom.emit = r600_emit_query_predication;
1262
1263 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1264 rctx->b.render_condition = r600_render_condition;
1265
1266 LIST_INITHEAD(&rctx->active_queries);
1267 }
1268
1269 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1270 {
1271 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1272 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1273 }