gallium: add pipe_context::set_active_query_state for pausing queries
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw {
31 struct r600_query b;
32
33 uint64_t begin_result;
34 uint64_t end_result;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle *fence;
37 };
38
39 static void r600_query_sw_destroy(struct r600_common_context *rctx,
40 struct r600_query *rquery)
41 {
42 struct pipe_screen *screen = rctx->b.screen;
43 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
44
45 screen->fence_reference(screen, &query->fence, NULL);
46 FREE(query);
47 }
48
49 static enum radeon_value_id winsys_id_from_type(unsigned type)
50 {
51 switch (type) {
52 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
53 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
54 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
55 case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
56 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
57 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
58 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
59 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
60 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
61 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
62 default: unreachable("query type does not correspond to winsys id");
63 }
64 }
65
66 static boolean r600_query_sw_begin(struct r600_common_context *rctx,
67 struct r600_query *rquery)
68 {
69 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
70
71 switch(query->b.type) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT:
73 case PIPE_QUERY_GPU_FINISHED:
74 break;
75 case R600_QUERY_DRAW_CALLS:
76 query->begin_result = rctx->num_draw_calls;
77 break;
78 case R600_QUERY_REQUESTED_VRAM:
79 case R600_QUERY_REQUESTED_GTT:
80 case R600_QUERY_VRAM_USAGE:
81 case R600_QUERY_GTT_USAGE:
82 case R600_QUERY_GPU_TEMPERATURE:
83 case R600_QUERY_CURRENT_GPU_SCLK:
84 case R600_QUERY_CURRENT_GPU_MCLK:
85 query->begin_result = 0;
86 break;
87 case R600_QUERY_BUFFER_WAIT_TIME:
88 case R600_QUERY_NUM_CS_FLUSHES:
89 case R600_QUERY_NUM_BYTES_MOVED: {
90 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
91 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
92 break;
93 }
94 case R600_QUERY_GPU_LOAD:
95 query->begin_result = r600_gpu_load_begin(rctx->screen);
96 break;
97 case R600_QUERY_NUM_COMPILATIONS:
98 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
99 break;
100 case R600_QUERY_NUM_SHADERS_CREATED:
101 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
102 break;
103 case R600_QUERY_GPIN_ASIC_ID:
104 case R600_QUERY_GPIN_NUM_SIMD:
105 case R600_QUERY_GPIN_NUM_RB:
106 case R600_QUERY_GPIN_NUM_SPI:
107 case R600_QUERY_GPIN_NUM_SE:
108 break;
109 default:
110 unreachable("r600_query_sw_begin: bad query type");
111 }
112
113 return TRUE;
114 }
115
116 static void r600_query_sw_end(struct r600_common_context *rctx,
117 struct r600_query *rquery)
118 {
119 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
120
121 switch(query->b.type) {
122 case PIPE_QUERY_TIMESTAMP_DISJOINT:
123 break;
124 case PIPE_QUERY_GPU_FINISHED:
125 rctx->b.flush(&rctx->b, &query->fence, 0);
126 break;
127 case R600_QUERY_DRAW_CALLS:
128 query->end_result = rctx->num_draw_calls;
129 break;
130 case R600_QUERY_REQUESTED_VRAM:
131 case R600_QUERY_REQUESTED_GTT:
132 case R600_QUERY_VRAM_USAGE:
133 case R600_QUERY_GTT_USAGE:
134 case R600_QUERY_GPU_TEMPERATURE:
135 case R600_QUERY_CURRENT_GPU_SCLK:
136 case R600_QUERY_CURRENT_GPU_MCLK:
137 case R600_QUERY_BUFFER_WAIT_TIME:
138 case R600_QUERY_NUM_CS_FLUSHES:
139 case R600_QUERY_NUM_BYTES_MOVED: {
140 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
141 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
142 break;
143 }
144 case R600_QUERY_GPU_LOAD:
145 query->end_result = r600_gpu_load_end(rctx->screen,
146 query->begin_result);
147 query->begin_result = 0;
148 break;
149 case R600_QUERY_NUM_COMPILATIONS:
150 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
151 break;
152 case R600_QUERY_NUM_SHADERS_CREATED:
153 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
154 break;
155 case R600_QUERY_GPIN_ASIC_ID:
156 case R600_QUERY_GPIN_NUM_SIMD:
157 case R600_QUERY_GPIN_NUM_RB:
158 case R600_QUERY_GPIN_NUM_SPI:
159 case R600_QUERY_GPIN_NUM_SE:
160 break;
161 default:
162 unreachable("r600_query_sw_end: bad query type");
163 }
164 }
165
166 static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
167 struct r600_query *rquery,
168 boolean wait,
169 union pipe_query_result *result)
170 {
171 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
172
173 switch (query->b.type) {
174 case PIPE_QUERY_TIMESTAMP_DISJOINT:
175 /* Convert from cycles per millisecond to cycles per second (Hz). */
176 result->timestamp_disjoint.frequency =
177 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
178 result->timestamp_disjoint.disjoint = FALSE;
179 return TRUE;
180 case PIPE_QUERY_GPU_FINISHED: {
181 struct pipe_screen *screen = rctx->b.screen;
182 result->b = screen->fence_finish(screen, query->fence,
183 wait ? PIPE_TIMEOUT_INFINITE : 0);
184 return result->b;
185 }
186
187 case R600_QUERY_GPIN_ASIC_ID:
188 result->u32 = 0;
189 return TRUE;
190 case R600_QUERY_GPIN_NUM_SIMD:
191 result->u32 = rctx->screen->info.num_good_compute_units;
192 return TRUE;
193 case R600_QUERY_GPIN_NUM_RB:
194 result->u32 = rctx->screen->info.num_render_backends;
195 return TRUE;
196 case R600_QUERY_GPIN_NUM_SPI:
197 result->u32 = 1; /* all supported chips have one SPI per SE */
198 return TRUE;
199 case R600_QUERY_GPIN_NUM_SE:
200 result->u32 = rctx->screen->info.max_se;
201 return TRUE;
202 }
203
204 result->u64 = query->end_result - query->begin_result;
205
206 switch (query->b.type) {
207 case R600_QUERY_BUFFER_WAIT_TIME:
208 case R600_QUERY_GPU_TEMPERATURE:
209 result->u64 /= 1000;
210 break;
211 case R600_QUERY_CURRENT_GPU_SCLK:
212 case R600_QUERY_CURRENT_GPU_MCLK:
213 result->u64 *= 1000000;
214 break;
215 }
216
217 return TRUE;
218 }
219
220 static struct r600_query_ops sw_query_ops = {
221 .destroy = r600_query_sw_destroy,
222 .begin = r600_query_sw_begin,
223 .end = r600_query_sw_end,
224 .get_result = r600_query_sw_get_result
225 };
226
227 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
228 unsigned query_type)
229 {
230 struct r600_query_sw *query;
231
232 query = CALLOC_STRUCT(r600_query_sw);
233 if (!query)
234 return NULL;
235
236 query->b.type = query_type;
237 query->b.ops = &sw_query_ops;
238
239 return (struct pipe_query *)query;
240 }
241
242 void r600_query_hw_destroy(struct r600_common_context *rctx,
243 struct r600_query *rquery)
244 {
245 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
246 struct r600_query_buffer *prev = query->buffer.previous;
247
248 /* Release all query buffers. */
249 while (prev) {
250 struct r600_query_buffer *qbuf = prev;
251 prev = prev->previous;
252 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
253 FREE(qbuf);
254 }
255
256 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
257 FREE(rquery);
258 }
259
260 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
261 struct r600_query_hw *query)
262 {
263 unsigned buf_size = MAX2(query->result_size, 4096);
264
265 /* Queries are normally read by the CPU after
266 * being written by the gpu, hence staging is probably a good
267 * usage pattern.
268 */
269 struct r600_resource *buf = (struct r600_resource*)
270 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
271 PIPE_USAGE_STAGING, buf_size);
272
273 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE)
274 query->ops->prepare_buffer(ctx, query, buf);
275
276 return buf;
277 }
278
279 static void r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
280 struct r600_query_hw *query,
281 struct r600_resource *buffer)
282 {
283 /* Callers ensure that the buffer is currently unused by the GPU. */
284 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
285 PIPE_TRANSFER_WRITE |
286 PIPE_TRANSFER_UNSYNCHRONIZED);
287
288 memset(results, 0, buffer->b.b.width0);
289
290 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
291 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
292 unsigned num_results;
293 unsigned i, j;
294
295 /* Set top bits for unused backends. */
296 num_results = buffer->b.b.width0 / (16 * ctx->max_db);
297 for (j = 0; j < num_results; j++) {
298 for (i = 0; i < ctx->max_db; i++) {
299 if (!(ctx->backend_mask & (1<<i))) {
300 results[(i * 4)+1] = 0x80000000;
301 results[(i * 4)+3] = 0x80000000;
302 }
303 }
304 results += 4 * ctx->max_db;
305 }
306 }
307 }
308
309 static struct r600_query_ops query_hw_ops = {
310 .destroy = r600_query_hw_destroy,
311 .begin = r600_query_hw_begin,
312 .end = r600_query_hw_end,
313 .get_result = r600_query_hw_get_result,
314 };
315
316 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
317 struct r600_query_hw *query,
318 struct r600_resource *buffer,
319 uint64_t va);
320 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
321 struct r600_query_hw *query,
322 struct r600_resource *buffer,
323 uint64_t va);
324 static void r600_query_hw_add_result(struct r600_common_context *ctx,
325 struct r600_query_hw *, void *buffer,
326 union pipe_query_result *result);
327 static void r600_query_hw_clear_result(struct r600_query_hw *,
328 union pipe_query_result *);
329
330 static struct r600_query_hw_ops query_hw_default_hw_ops = {
331 .prepare_buffer = r600_query_hw_prepare_buffer,
332 .emit_start = r600_query_hw_do_emit_start,
333 .emit_stop = r600_query_hw_do_emit_stop,
334 .clear_result = r600_query_hw_clear_result,
335 .add_result = r600_query_hw_add_result,
336 };
337
338 boolean r600_query_hw_init(struct r600_common_context *rctx,
339 struct r600_query_hw *query)
340 {
341 query->buffer.buf = r600_new_query_buffer(rctx, query);
342 if (!query->buffer.buf)
343 return FALSE;
344
345 return TRUE;
346 }
347
348 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
349 unsigned query_type,
350 unsigned index)
351 {
352 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
353 if (!query)
354 return NULL;
355
356 query->b.type = query_type;
357 query->b.ops = &query_hw_ops;
358 query->ops = &query_hw_default_hw_ops;
359
360 switch (query_type) {
361 case PIPE_QUERY_OCCLUSION_COUNTER:
362 case PIPE_QUERY_OCCLUSION_PREDICATE:
363 query->result_size = 16 * rctx->max_db;
364 query->num_cs_dw_begin = 6;
365 query->num_cs_dw_end = 6;
366 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
367 break;
368 case PIPE_QUERY_TIME_ELAPSED:
369 query->result_size = 16;
370 query->num_cs_dw_begin = 8;
371 query->num_cs_dw_end = 8;
372 query->flags = R600_QUERY_HW_FLAG_TIMER;
373 break;
374 case PIPE_QUERY_TIMESTAMP:
375 query->result_size = 8;
376 query->num_cs_dw_end = 8;
377 query->flags = R600_QUERY_HW_FLAG_TIMER |
378 R600_QUERY_HW_FLAG_NO_START;
379 break;
380 case PIPE_QUERY_PRIMITIVES_EMITTED:
381 case PIPE_QUERY_PRIMITIVES_GENERATED:
382 case PIPE_QUERY_SO_STATISTICS:
383 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
384 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
385 query->result_size = 32;
386 query->num_cs_dw_begin = 6;
387 query->num_cs_dw_end = 6;
388 query->stream = index;
389 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
390 break;
391 case PIPE_QUERY_PIPELINE_STATISTICS:
392 /* 11 values on EG, 8 on R600. */
393 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
394 query->num_cs_dw_begin = 6;
395 query->num_cs_dw_end = 6;
396 break;
397 default:
398 assert(0);
399 FREE(query);
400 return NULL;
401 }
402
403 if (!r600_query_hw_init(rctx, query)) {
404 FREE(query);
405 return NULL;
406 }
407
408 return (struct pipe_query *)query;
409 }
410
411 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
412 unsigned type, int diff)
413 {
414 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
415 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
416 bool old_enable = rctx->num_occlusion_queries != 0;
417 bool old_perfect_enable =
418 rctx->num_perfect_occlusion_queries != 0;
419 bool enable, perfect_enable;
420
421 rctx->num_occlusion_queries += diff;
422 assert(rctx->num_occlusion_queries >= 0);
423
424 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
425 rctx->num_perfect_occlusion_queries += diff;
426 assert(rctx->num_perfect_occlusion_queries >= 0);
427 }
428
429 enable = rctx->num_occlusion_queries != 0;
430 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
431
432 if (enable != old_enable || perfect_enable != old_perfect_enable) {
433 rctx->set_occlusion_query_state(&rctx->b, enable);
434 }
435 }
436 }
437
438 static unsigned event_type_for_stream(struct r600_query_hw *query)
439 {
440 switch (query->stream) {
441 default:
442 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
443 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
444 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
445 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
446 }
447 }
448
449 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
450 struct r600_query_hw *query,
451 struct r600_resource *buffer,
452 uint64_t va)
453 {
454 struct radeon_winsys_cs *cs = ctx->gfx.cs;
455
456 switch (query->b.type) {
457 case PIPE_QUERY_OCCLUSION_COUNTER:
458 case PIPE_QUERY_OCCLUSION_PREDICATE:
459 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
460 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
461 radeon_emit(cs, va);
462 radeon_emit(cs, (va >> 32) & 0xFFFF);
463 break;
464 case PIPE_QUERY_PRIMITIVES_EMITTED:
465 case PIPE_QUERY_PRIMITIVES_GENERATED:
466 case PIPE_QUERY_SO_STATISTICS:
467 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
468 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
469 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
470 radeon_emit(cs, va);
471 radeon_emit(cs, (va >> 32) & 0xFFFF);
472 break;
473 case PIPE_QUERY_TIME_ELAPSED:
474 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
475 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
476 radeon_emit(cs, va);
477 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
478 radeon_emit(cs, 0);
479 radeon_emit(cs, 0);
480 break;
481 case PIPE_QUERY_PIPELINE_STATISTICS:
482 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
483 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
484 radeon_emit(cs, va);
485 radeon_emit(cs, (va >> 32) & 0xFFFF);
486 break;
487 default:
488 assert(0);
489 }
490 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
491 RADEON_PRIO_QUERY);
492 }
493
494 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
495 struct r600_query_hw *query)
496 {
497 uint64_t va;
498
499 r600_update_occlusion_query_state(ctx, query->b.type, 1);
500 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
501
502 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
503 TRUE);
504
505 /* Get a new query buffer if needed. */
506 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
507 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
508 *qbuf = query->buffer;
509 query->buffer.buf = r600_new_query_buffer(ctx, query);
510 query->buffer.results_end = 0;
511 query->buffer.previous = qbuf;
512 }
513
514 /* emit begin query */
515 va = query->buffer.buf->gpu_address + query->buffer.results_end;
516
517 query->ops->emit_start(ctx, query, query->buffer.buf, va);
518
519 if (query->flags & R600_QUERY_HW_FLAG_TIMER)
520 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw_end;
521 else
522 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw_end;
523 }
524
525 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
526 struct r600_query_hw *query,
527 struct r600_resource *buffer,
528 uint64_t va)
529 {
530 struct radeon_winsys_cs *cs = ctx->gfx.cs;
531
532 switch (query->b.type) {
533 case PIPE_QUERY_OCCLUSION_COUNTER:
534 case PIPE_QUERY_OCCLUSION_PREDICATE:
535 va += 8;
536 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
537 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
538 radeon_emit(cs, va);
539 radeon_emit(cs, (va >> 32) & 0xFFFF);
540 break;
541 case PIPE_QUERY_PRIMITIVES_EMITTED:
542 case PIPE_QUERY_PRIMITIVES_GENERATED:
543 case PIPE_QUERY_SO_STATISTICS:
544 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
545 va += query->result_size/2;
546 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
547 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
548 radeon_emit(cs, va);
549 radeon_emit(cs, (va >> 32) & 0xFFFF);
550 break;
551 case PIPE_QUERY_TIME_ELAPSED:
552 va += query->result_size/2;
553 /* fall through */
554 case PIPE_QUERY_TIMESTAMP:
555 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
556 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
557 radeon_emit(cs, va);
558 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
559 radeon_emit(cs, 0);
560 radeon_emit(cs, 0);
561 break;
562 case PIPE_QUERY_PIPELINE_STATISTICS:
563 va += query->result_size/2;
564 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
565 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
566 radeon_emit(cs, va);
567 radeon_emit(cs, (va >> 32) & 0xFFFF);
568 break;
569 default:
570 assert(0);
571 }
572 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
573 RADEON_PRIO_QUERY);
574 }
575
576 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
577 struct r600_query_hw *query)
578 {
579 uint64_t va;
580
581 /* The queries which need begin already called this in begin_query. */
582 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
583 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
584 }
585
586 /* emit end query */
587 va = query->buffer.buf->gpu_address + query->buffer.results_end;
588
589 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
590
591 query->buffer.results_end += query->result_size;
592
593 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START)) {
594 if (query->flags & R600_QUERY_HW_FLAG_TIMER)
595 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw_end;
596 else
597 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw_end;
598 }
599
600 r600_update_occlusion_query_state(ctx, query->b.type, -1);
601 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
602 }
603
604 static void r600_emit_query_predication(struct r600_common_context *ctx,
605 struct r600_atom *atom)
606 {
607 struct radeon_winsys_cs *cs = ctx->gfx.cs;
608 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
609 struct r600_query_buffer *qbuf;
610 uint32_t op;
611 bool flag_wait;
612
613 if (!query)
614 return;
615
616 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
617 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
618
619 switch (query->b.type) {
620 case PIPE_QUERY_OCCLUSION_COUNTER:
621 case PIPE_QUERY_OCCLUSION_PREDICATE:
622 op = PRED_OP(PREDICATION_OP_ZPASS);
623 break;
624 case PIPE_QUERY_PRIMITIVES_EMITTED:
625 case PIPE_QUERY_PRIMITIVES_GENERATED:
626 case PIPE_QUERY_SO_STATISTICS:
627 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
628 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
629 break;
630 default:
631 assert(0);
632 return;
633 }
634
635 /* if true then invert, see GL_ARB_conditional_render_inverted */
636 if (ctx->render_cond_invert)
637 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
638 else
639 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
640
641 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
642
643 /* emit predicate packets for all data blocks */
644 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
645 unsigned results_base = 0;
646 uint64_t va = qbuf->buf->gpu_address;
647
648 while (results_base < qbuf->results_end) {
649 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
650 radeon_emit(cs, va + results_base);
651 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
652 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
653 RADEON_PRIO_QUERY);
654 results_base += query->result_size;
655
656 /* set CONTINUE bit for all packets except the first */
657 op |= PREDICATION_CONTINUE;
658 }
659 }
660 }
661
662 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
663 {
664 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
665
666 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
667 query_type == PIPE_QUERY_GPU_FINISHED ||
668 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
669 return r600_query_sw_create(ctx, query_type);
670
671 return r600_query_hw_create(rctx, query_type, index);
672 }
673
674 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
675 {
676 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
677 struct r600_query *rquery = (struct r600_query *)query;
678
679 rquery->ops->destroy(rctx, rquery);
680 }
681
682 static boolean r600_begin_query(struct pipe_context *ctx,
683 struct pipe_query *query)
684 {
685 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
686 struct r600_query *rquery = (struct r600_query *)query;
687
688 return rquery->ops->begin(rctx, rquery);
689 }
690
691 static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
692 struct r600_query_hw *query)
693 {
694 struct r600_query_buffer *prev = query->buffer.previous;
695
696 /* Discard the old query buffers. */
697 while (prev) {
698 struct r600_query_buffer *qbuf = prev;
699 prev = prev->previous;
700 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
701 FREE(qbuf);
702 }
703
704 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
705 /* Obtain a new buffer if the current one can't be mapped without a stall. */
706 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
707 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
708 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
709 query->buffer.buf = r600_new_query_buffer(rctx, query);
710 } else {
711 query->ops->prepare_buffer(rctx, query, query->buffer.buf);
712 }
713 }
714
715 query->buffer.results_end = 0;
716 query->buffer.previous = NULL;
717 }
718
719 boolean r600_query_hw_begin(struct r600_common_context *rctx,
720 struct r600_query *rquery)
721 {
722 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
723
724 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
725 assert(0);
726 return false;
727 }
728
729 r600_query_hw_reset_buffers(rctx, query);
730
731 r600_query_hw_emit_start(rctx, query);
732
733 if (query->flags & R600_QUERY_HW_FLAG_TIMER)
734 LIST_ADDTAIL(&query->list, &rctx->active_timer_queries);
735 else
736 LIST_ADDTAIL(&query->list, &rctx->active_nontimer_queries);
737 return true;
738 }
739
740 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
741 {
742 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
743 struct r600_query *rquery = (struct r600_query *)query;
744
745 rquery->ops->end(rctx, rquery);
746 }
747
748 void r600_query_hw_end(struct r600_common_context *rctx,
749 struct r600_query *rquery)
750 {
751 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
752
753 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
754 r600_query_hw_reset_buffers(rctx, query);
755
756 r600_query_hw_emit_stop(rctx, query);
757
758 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
759 LIST_DELINIT(&query->list);
760 }
761
762 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
763 bool test_status_bit)
764 {
765 uint32_t *current_result = (uint32_t*)map;
766 uint64_t start, end;
767
768 start = (uint64_t)current_result[start_index] |
769 (uint64_t)current_result[start_index+1] << 32;
770 end = (uint64_t)current_result[end_index] |
771 (uint64_t)current_result[end_index+1] << 32;
772
773 if (!test_status_bit ||
774 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
775 return end - start;
776 }
777 return 0;
778 }
779
780 static void r600_query_hw_add_result(struct r600_common_context *ctx,
781 struct r600_query_hw *query,
782 void *buffer,
783 union pipe_query_result *result)
784 {
785 switch (query->b.type) {
786 case PIPE_QUERY_OCCLUSION_COUNTER: {
787 unsigned results_base = 0;
788 while (results_base != query->result_size) {
789 result->u64 +=
790 r600_query_read_result(buffer + results_base, 0, 2, true);
791 results_base += 16;
792 }
793 break;
794 }
795 case PIPE_QUERY_OCCLUSION_PREDICATE: {
796 unsigned results_base = 0;
797 while (results_base != query->result_size) {
798 result->b = result->b ||
799 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
800 results_base += 16;
801 }
802 break;
803 }
804 case PIPE_QUERY_TIME_ELAPSED:
805 result->u64 += r600_query_read_result(buffer, 0, 2, false);
806 break;
807 case PIPE_QUERY_TIMESTAMP:
808 {
809 uint32_t *current_result = (uint32_t*)buffer;
810 result->u64 = (uint64_t)current_result[0] |
811 (uint64_t)current_result[1] << 32;
812 break;
813 }
814 case PIPE_QUERY_PRIMITIVES_EMITTED:
815 /* SAMPLE_STREAMOUTSTATS stores this structure:
816 * {
817 * u64 NumPrimitivesWritten;
818 * u64 PrimitiveStorageNeeded;
819 * }
820 * We only need NumPrimitivesWritten here. */
821 result->u64 += r600_query_read_result(buffer, 2, 6, true);
822 break;
823 case PIPE_QUERY_PRIMITIVES_GENERATED:
824 /* Here we read PrimitiveStorageNeeded. */
825 result->u64 += r600_query_read_result(buffer, 0, 4, true);
826 break;
827 case PIPE_QUERY_SO_STATISTICS:
828 result->so_statistics.num_primitives_written +=
829 r600_query_read_result(buffer, 2, 6, true);
830 result->so_statistics.primitives_storage_needed +=
831 r600_query_read_result(buffer, 0, 4, true);
832 break;
833 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
834 result->b = result->b ||
835 r600_query_read_result(buffer, 2, 6, true) !=
836 r600_query_read_result(buffer, 0, 4, true);
837 break;
838 case PIPE_QUERY_PIPELINE_STATISTICS:
839 if (ctx->chip_class >= EVERGREEN) {
840 result->pipeline_statistics.ps_invocations +=
841 r600_query_read_result(buffer, 0, 22, false);
842 result->pipeline_statistics.c_primitives +=
843 r600_query_read_result(buffer, 2, 24, false);
844 result->pipeline_statistics.c_invocations +=
845 r600_query_read_result(buffer, 4, 26, false);
846 result->pipeline_statistics.vs_invocations +=
847 r600_query_read_result(buffer, 6, 28, false);
848 result->pipeline_statistics.gs_invocations +=
849 r600_query_read_result(buffer, 8, 30, false);
850 result->pipeline_statistics.gs_primitives +=
851 r600_query_read_result(buffer, 10, 32, false);
852 result->pipeline_statistics.ia_primitives +=
853 r600_query_read_result(buffer, 12, 34, false);
854 result->pipeline_statistics.ia_vertices +=
855 r600_query_read_result(buffer, 14, 36, false);
856 result->pipeline_statistics.hs_invocations +=
857 r600_query_read_result(buffer, 16, 38, false);
858 result->pipeline_statistics.ds_invocations +=
859 r600_query_read_result(buffer, 18, 40, false);
860 result->pipeline_statistics.cs_invocations +=
861 r600_query_read_result(buffer, 20, 42, false);
862 } else {
863 result->pipeline_statistics.ps_invocations +=
864 r600_query_read_result(buffer, 0, 16, false);
865 result->pipeline_statistics.c_primitives +=
866 r600_query_read_result(buffer, 2, 18, false);
867 result->pipeline_statistics.c_invocations +=
868 r600_query_read_result(buffer, 4, 20, false);
869 result->pipeline_statistics.vs_invocations +=
870 r600_query_read_result(buffer, 6, 22, false);
871 result->pipeline_statistics.gs_invocations +=
872 r600_query_read_result(buffer, 8, 24, false);
873 result->pipeline_statistics.gs_primitives +=
874 r600_query_read_result(buffer, 10, 26, false);
875 result->pipeline_statistics.ia_primitives +=
876 r600_query_read_result(buffer, 12, 28, false);
877 result->pipeline_statistics.ia_vertices +=
878 r600_query_read_result(buffer, 14, 30, false);
879 }
880 #if 0 /* for testing */
881 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
882 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
883 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
884 result->pipeline_statistics.ia_vertices,
885 result->pipeline_statistics.ia_primitives,
886 result->pipeline_statistics.vs_invocations,
887 result->pipeline_statistics.hs_invocations,
888 result->pipeline_statistics.ds_invocations,
889 result->pipeline_statistics.gs_invocations,
890 result->pipeline_statistics.gs_primitives,
891 result->pipeline_statistics.c_invocations,
892 result->pipeline_statistics.c_primitives,
893 result->pipeline_statistics.ps_invocations,
894 result->pipeline_statistics.cs_invocations);
895 #endif
896 break;
897 default:
898 assert(0);
899 }
900 }
901
902 static boolean r600_get_query_result(struct pipe_context *ctx,
903 struct pipe_query *query, boolean wait,
904 union pipe_query_result *result)
905 {
906 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
907 struct r600_query *rquery = (struct r600_query *)query;
908
909 return rquery->ops->get_result(rctx, rquery, wait, result);
910 }
911
912 static void r600_query_hw_clear_result(struct r600_query_hw *query,
913 union pipe_query_result *result)
914 {
915 util_query_clear_result(result, query->b.type);
916 }
917
918 boolean r600_query_hw_get_result(struct r600_common_context *rctx,
919 struct r600_query *rquery,
920 boolean wait, union pipe_query_result *result)
921 {
922 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
923 struct r600_query_buffer *qbuf;
924
925 query->ops->clear_result(query, result);
926
927 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
928 unsigned results_base = 0;
929 void *map;
930
931 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
932 PIPE_TRANSFER_READ |
933 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
934 if (!map)
935 return FALSE;
936
937 while (results_base != qbuf->results_end) {
938 query->ops->add_result(rctx, query, map + results_base,
939 result);
940 results_base += query->result_size;
941 }
942 }
943
944 /* Convert the time to expected units. */
945 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
946 rquery->type == PIPE_QUERY_TIMESTAMP) {
947 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
948 }
949 return TRUE;
950 }
951
952 static void r600_render_condition(struct pipe_context *ctx,
953 struct pipe_query *query,
954 boolean condition,
955 uint mode)
956 {
957 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
958 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
959 struct r600_query_buffer *qbuf;
960 struct r600_atom *atom = &rctx->render_cond_atom;
961
962 rctx->render_cond = query;
963 rctx->render_cond_invert = condition;
964 rctx->render_cond_mode = mode;
965
966 /* Compute the size of SET_PREDICATION packets. */
967 atom->num_dw = 0;
968 if (query) {
969 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
970 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
971 }
972
973 rctx->set_atom_dirty(rctx, atom, query != NULL);
974 }
975
976 static void r600_suspend_queries(struct r600_common_context *ctx,
977 struct list_head *query_list,
978 unsigned *num_cs_dw_queries_suspend)
979 {
980 struct r600_query_hw *query;
981
982 LIST_FOR_EACH_ENTRY(query, query_list, list) {
983 r600_query_hw_emit_stop(ctx, query);
984 }
985 assert(*num_cs_dw_queries_suspend == 0);
986 }
987
988 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
989 {
990 r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
991 &ctx->num_cs_dw_nontimer_queries_suspend);
992 }
993
994 void r600_suspend_timer_queries(struct r600_common_context *ctx)
995 {
996 r600_suspend_queries(ctx, &ctx->active_timer_queries,
997 &ctx->num_cs_dw_timer_queries_suspend);
998 }
999
1000 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1001 struct list_head *query_list)
1002 {
1003 struct r600_query_hw *query;
1004 unsigned num_dw = 0;
1005
1006 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1007 /* begin + end */
1008 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1009
1010 /* Workaround for the fact that
1011 * num_cs_dw_nontimer_queries_suspend is incremented for every
1012 * resumed query, which raises the bar in need_cs_space for
1013 * queries about to be resumed.
1014 */
1015 num_dw += query->num_cs_dw_end;
1016 }
1017 /* primitives generated query */
1018 num_dw += ctx->streamout.enable_atom.num_dw;
1019 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1020 num_dw += 13;
1021
1022 return num_dw;
1023 }
1024
1025 static void r600_resume_queries(struct r600_common_context *ctx,
1026 struct list_head *query_list,
1027 unsigned *num_cs_dw_queries_suspend)
1028 {
1029 struct r600_query_hw *query;
1030 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
1031
1032 assert(*num_cs_dw_queries_suspend == 0);
1033
1034 /* Check CS space here. Resuming must not be interrupted by flushes. */
1035 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
1036
1037 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1038 r600_query_hw_emit_start(ctx, query);
1039 }
1040 }
1041
1042 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
1043 {
1044 r600_resume_queries(ctx, &ctx->active_nontimer_queries,
1045 &ctx->num_cs_dw_nontimer_queries_suspend);
1046 }
1047
1048 void r600_resume_timer_queries(struct r600_common_context *ctx)
1049 {
1050 r600_resume_queries(ctx, &ctx->active_timer_queries,
1051 &ctx->num_cs_dw_timer_queries_suspend);
1052 }
1053
1054 /* Get backends mask */
1055 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1056 {
1057 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1058 struct r600_resource *buffer;
1059 uint32_t *results;
1060 unsigned num_backends = ctx->screen->info.num_render_backends;
1061 unsigned i, mask = 0;
1062
1063 /* if backend_map query is supported by the kernel */
1064 if (ctx->screen->info.r600_gb_backend_map_valid) {
1065 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1066 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1067 unsigned item_width, item_mask;
1068
1069 if (ctx->chip_class >= EVERGREEN) {
1070 item_width = 4;
1071 item_mask = 0x7;
1072 } else {
1073 item_width = 2;
1074 item_mask = 0x3;
1075 }
1076
1077 while (num_tile_pipes--) {
1078 i = backend_map & item_mask;
1079 mask |= (1<<i);
1080 backend_map >>= item_width;
1081 }
1082 if (mask != 0) {
1083 ctx->backend_mask = mask;
1084 return;
1085 }
1086 }
1087
1088 /* otherwise backup path for older kernels */
1089
1090 /* create buffer for event data */
1091 buffer = (struct r600_resource*)
1092 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1093 PIPE_USAGE_STAGING, ctx->max_db*16);
1094 if (!buffer)
1095 goto err;
1096
1097 /* initialize buffer with zeroes */
1098 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1099 if (results) {
1100 memset(results, 0, ctx->max_db * 4 * 4);
1101
1102 /* emit EVENT_WRITE for ZPASS_DONE */
1103 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1104 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1105 radeon_emit(cs, buffer->gpu_address);
1106 radeon_emit(cs, buffer->gpu_address >> 32);
1107
1108 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1109 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1110
1111 /* analyze results */
1112 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1113 if (results) {
1114 for(i = 0; i < ctx->max_db; i++) {
1115 /* at least highest bit will be set if backend is used */
1116 if (results[i*4 + 1])
1117 mask |= (1<<i);
1118 }
1119 }
1120 }
1121
1122 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1123
1124 if (mask != 0) {
1125 ctx->backend_mask = mask;
1126 return;
1127 }
1128
1129 err:
1130 /* fallback to old method - set num_backends lower bits to 1 */
1131 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1132 return;
1133 }
1134
1135 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1136 { \
1137 .name = name_, \
1138 .query_type = R600_QUERY_##query_type_, \
1139 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1140 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1141 .group_id = group_id_ \
1142 }
1143
1144 #define X(name_, query_type_, type_, result_type_) \
1145 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1146
1147 #define XG(group_, name_, query_type_, type_, result_type_) \
1148 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1149
1150 static struct pipe_driver_query_info r600_driver_query_list[] = {
1151 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1152 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1153 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1154 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1155 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1156 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1157 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1158 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1159 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1160 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1161
1162 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1163 * which use it as a fallback path to detect the GPU type.
1164 *
1165 * Note: The names of these queries are significant for GPUPerfStudio
1166 * (and possibly their order as well). */
1167 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1168 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1169 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1170 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1171 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1172
1173 /* The following queries must be at the end of the list because their
1174 * availability is adjusted dynamically based on the DRM version. */
1175 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1176 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1177 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1178 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1179 };
1180
1181 #undef X
1182 #undef XG
1183 #undef XFULL
1184
1185 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1186 {
1187 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1188 return Elements(r600_driver_query_list);
1189 else if (rscreen->info.drm_major == 3)
1190 return Elements(r600_driver_query_list) - 3;
1191 else
1192 return Elements(r600_driver_query_list) - 4;
1193 }
1194
1195 static int r600_get_driver_query_info(struct pipe_screen *screen,
1196 unsigned index,
1197 struct pipe_driver_query_info *info)
1198 {
1199 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1200 unsigned num_queries = r600_get_num_queries(rscreen);
1201
1202 if (!info) {
1203 unsigned num_perfcounters =
1204 r600_get_perfcounter_info(rscreen, 0, NULL);
1205
1206 return num_queries + num_perfcounters;
1207 }
1208
1209 if (index >= num_queries)
1210 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1211
1212 *info = r600_driver_query_list[index];
1213
1214 switch (info->query_type) {
1215 case R600_QUERY_REQUESTED_VRAM:
1216 case R600_QUERY_VRAM_USAGE:
1217 info->max_value.u64 = rscreen->info.vram_size;
1218 break;
1219 case R600_QUERY_REQUESTED_GTT:
1220 case R600_QUERY_GTT_USAGE:
1221 info->max_value.u64 = rscreen->info.gart_size;
1222 break;
1223 case R600_QUERY_GPU_TEMPERATURE:
1224 info->max_value.u64 = 125;
1225 break;
1226 }
1227
1228 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1229 info->group_id += rscreen->perfcounters->num_groups;
1230
1231 return 1;
1232 }
1233
1234 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1235 * performance counter groups, so be careful when changing this and related
1236 * functions.
1237 */
1238 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1239 unsigned index,
1240 struct pipe_driver_query_group_info *info)
1241 {
1242 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1243 unsigned num_pc_groups = 0;
1244
1245 if (rscreen->perfcounters)
1246 num_pc_groups = rscreen->perfcounters->num_groups;
1247
1248 if (!info)
1249 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1250
1251 if (index < num_pc_groups)
1252 return r600_get_perfcounter_group_info(rscreen, index, info);
1253
1254 index -= num_pc_groups;
1255 if (index >= R600_NUM_SW_QUERY_GROUPS)
1256 return 0;
1257
1258 info->name = "GPIN";
1259 info->max_active_queries = 5;
1260 info->num_queries = 5;
1261 return 1;
1262 }
1263
1264 static void
1265 r600_set_active_query_state(struct pipe_context *pipe, boolean enable)
1266 {
1267 }
1268
1269 void r600_query_init(struct r600_common_context *rctx)
1270 {
1271 rctx->b.create_query = r600_create_query;
1272 rctx->b.create_batch_query = r600_create_batch_query;
1273 rctx->b.destroy_query = r600_destroy_query;
1274 rctx->b.begin_query = r600_begin_query;
1275 rctx->b.end_query = r600_end_query;
1276 rctx->b.get_query_result = r600_get_query_result;
1277 rctx->b.set_active_query_state = r600_set_active_query_state;
1278 rctx->render_cond_atom.emit = r600_emit_query_predication;
1279
1280 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1281 rctx->b.render_condition = r600_render_condition;
1282
1283 LIST_INITHEAD(&rctx->active_nontimer_queries);
1284 LIST_INITHEAD(&rctx->active_timer_queries);
1285 }
1286
1287 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1288 {
1289 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1290 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1291 }