gallium/radeon: don't flush CB/DB caches for timestamp queries
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 /* Queries without buffer handling or suspend/resume. */
30 struct r600_query_sw {
31 struct r600_query b;
32
33 uint64_t begin_result;
34 uint64_t end_result;
35 /* Fence for GPU_FINISHED. */
36 struct pipe_fence_handle *fence;
37 };
38
39 static void r600_query_sw_destroy(struct r600_common_context *rctx,
40 struct r600_query *rquery)
41 {
42 struct pipe_screen *screen = rctx->b.screen;
43 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
44
45 screen->fence_reference(screen, &query->fence, NULL);
46 FREE(query);
47 }
48
49 static enum radeon_value_id winsys_id_from_type(unsigned type)
50 {
51 switch (type) {
52 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
53 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
54 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
55 case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
56 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
57 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
58 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
59 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
60 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
61 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
62 default: unreachable("query type does not correspond to winsys id");
63 }
64 }
65
66 static boolean r600_query_sw_begin(struct r600_common_context *rctx,
67 struct r600_query *rquery)
68 {
69 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
70
71 switch(query->b.type) {
72 case PIPE_QUERY_TIMESTAMP_DISJOINT:
73 case PIPE_QUERY_GPU_FINISHED:
74 break;
75 case R600_QUERY_DRAW_CALLS:
76 query->begin_result = rctx->num_draw_calls;
77 break;
78 case R600_QUERY_REQUESTED_VRAM:
79 case R600_QUERY_REQUESTED_GTT:
80 case R600_QUERY_VRAM_USAGE:
81 case R600_QUERY_GTT_USAGE:
82 case R600_QUERY_GPU_TEMPERATURE:
83 case R600_QUERY_CURRENT_GPU_SCLK:
84 case R600_QUERY_CURRENT_GPU_MCLK:
85 query->begin_result = 0;
86 break;
87 case R600_QUERY_BUFFER_WAIT_TIME:
88 case R600_QUERY_NUM_CS_FLUSHES:
89 case R600_QUERY_NUM_BYTES_MOVED: {
90 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
91 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
92 break;
93 }
94 case R600_QUERY_GPU_LOAD:
95 query->begin_result = r600_gpu_load_begin(rctx->screen);
96 break;
97 case R600_QUERY_NUM_COMPILATIONS:
98 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
99 break;
100 case R600_QUERY_NUM_SHADERS_CREATED:
101 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
102 break;
103 case R600_QUERY_GPIN_ASIC_ID:
104 case R600_QUERY_GPIN_NUM_SIMD:
105 case R600_QUERY_GPIN_NUM_RB:
106 case R600_QUERY_GPIN_NUM_SPI:
107 case R600_QUERY_GPIN_NUM_SE:
108 break;
109 default:
110 unreachable("r600_query_sw_begin: bad query type");
111 }
112
113 return TRUE;
114 }
115
116 static void r600_query_sw_end(struct r600_common_context *rctx,
117 struct r600_query *rquery)
118 {
119 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
120
121 switch(query->b.type) {
122 case PIPE_QUERY_TIMESTAMP_DISJOINT:
123 break;
124 case PIPE_QUERY_GPU_FINISHED:
125 rctx->b.flush(&rctx->b, &query->fence, 0);
126 break;
127 case R600_QUERY_DRAW_CALLS:
128 query->end_result = rctx->num_draw_calls;
129 break;
130 case R600_QUERY_REQUESTED_VRAM:
131 case R600_QUERY_REQUESTED_GTT:
132 case R600_QUERY_VRAM_USAGE:
133 case R600_QUERY_GTT_USAGE:
134 case R600_QUERY_GPU_TEMPERATURE:
135 case R600_QUERY_CURRENT_GPU_SCLK:
136 case R600_QUERY_CURRENT_GPU_MCLK:
137 case R600_QUERY_BUFFER_WAIT_TIME:
138 case R600_QUERY_NUM_CS_FLUSHES:
139 case R600_QUERY_NUM_BYTES_MOVED: {
140 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
141 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
142 break;
143 }
144 case R600_QUERY_GPU_LOAD:
145 query->end_result = r600_gpu_load_end(rctx->screen,
146 query->begin_result);
147 query->begin_result = 0;
148 break;
149 case R600_QUERY_NUM_COMPILATIONS:
150 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
151 break;
152 case R600_QUERY_NUM_SHADERS_CREATED:
153 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
154 break;
155 case R600_QUERY_GPIN_ASIC_ID:
156 case R600_QUERY_GPIN_NUM_SIMD:
157 case R600_QUERY_GPIN_NUM_RB:
158 case R600_QUERY_GPIN_NUM_SPI:
159 case R600_QUERY_GPIN_NUM_SE:
160 break;
161 default:
162 unreachable("r600_query_sw_end: bad query type");
163 }
164 }
165
166 static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
167 struct r600_query *rquery,
168 boolean wait,
169 union pipe_query_result *result)
170 {
171 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
172
173 switch (query->b.type) {
174 case PIPE_QUERY_TIMESTAMP_DISJOINT:
175 /* Convert from cycles per millisecond to cycles per second (Hz). */
176 result->timestamp_disjoint.frequency =
177 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
178 result->timestamp_disjoint.disjoint = FALSE;
179 return TRUE;
180 case PIPE_QUERY_GPU_FINISHED: {
181 struct pipe_screen *screen = rctx->b.screen;
182 result->b = screen->fence_finish(screen, query->fence,
183 wait ? PIPE_TIMEOUT_INFINITE : 0);
184 return result->b;
185 }
186
187 case R600_QUERY_GPIN_ASIC_ID:
188 result->u32 = 0;
189 return TRUE;
190 case R600_QUERY_GPIN_NUM_SIMD:
191 result->u32 = rctx->screen->info.num_good_compute_units;
192 return TRUE;
193 case R600_QUERY_GPIN_NUM_RB:
194 result->u32 = rctx->screen->info.num_render_backends;
195 return TRUE;
196 case R600_QUERY_GPIN_NUM_SPI:
197 result->u32 = 1; /* all supported chips have one SPI per SE */
198 return TRUE;
199 case R600_QUERY_GPIN_NUM_SE:
200 result->u32 = rctx->screen->info.max_se;
201 return TRUE;
202 }
203
204 result->u64 = query->end_result - query->begin_result;
205
206 switch (query->b.type) {
207 case R600_QUERY_BUFFER_WAIT_TIME:
208 case R600_QUERY_GPU_TEMPERATURE:
209 result->u64 /= 1000;
210 break;
211 case R600_QUERY_CURRENT_GPU_SCLK:
212 case R600_QUERY_CURRENT_GPU_MCLK:
213 result->u64 *= 1000000;
214 break;
215 }
216
217 return TRUE;
218 }
219
220 static struct r600_query_ops sw_query_ops = {
221 .destroy = r600_query_sw_destroy,
222 .begin = r600_query_sw_begin,
223 .end = r600_query_sw_end,
224 .get_result = r600_query_sw_get_result
225 };
226
227 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
228 unsigned query_type)
229 {
230 struct r600_query_sw *query;
231
232 query = CALLOC_STRUCT(r600_query_sw);
233 if (!query)
234 return NULL;
235
236 query->b.type = query_type;
237 query->b.ops = &sw_query_ops;
238
239 return (struct pipe_query *)query;
240 }
241
242 void r600_query_hw_destroy(struct r600_common_context *rctx,
243 struct r600_query *rquery)
244 {
245 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
246 struct r600_query_buffer *prev = query->buffer.previous;
247
248 /* Release all query buffers. */
249 while (prev) {
250 struct r600_query_buffer *qbuf = prev;
251 prev = prev->previous;
252 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
253 FREE(qbuf);
254 }
255
256 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
257 FREE(rquery);
258 }
259
260 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
261 struct r600_query_hw *query)
262 {
263 unsigned buf_size = MAX2(query->result_size, 4096);
264
265 /* Queries are normally read by the CPU after
266 * being written by the gpu, hence staging is probably a good
267 * usage pattern.
268 */
269 struct r600_resource *buf = (struct r600_resource*)
270 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
271 PIPE_USAGE_STAGING, buf_size);
272
273 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE)
274 query->ops->prepare_buffer(ctx, query, buf);
275
276 return buf;
277 }
278
279 static void r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
280 struct r600_query_hw *query,
281 struct r600_resource *buffer)
282 {
283 /* Callers ensure that the buffer is currently unused by the GPU. */
284 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
285 PIPE_TRANSFER_WRITE |
286 PIPE_TRANSFER_UNSYNCHRONIZED);
287
288 memset(results, 0, buffer->b.b.width0);
289
290 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
291 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
292 unsigned num_results;
293 unsigned i, j;
294
295 /* Set top bits for unused backends. */
296 num_results = buffer->b.b.width0 / (16 * ctx->max_db);
297 for (j = 0; j < num_results; j++) {
298 for (i = 0; i < ctx->max_db; i++) {
299 if (!(ctx->backend_mask & (1<<i))) {
300 results[(i * 4)+1] = 0x80000000;
301 results[(i * 4)+3] = 0x80000000;
302 }
303 }
304 results += 4 * ctx->max_db;
305 }
306 }
307 }
308
309 static struct r600_query_ops query_hw_ops = {
310 .destroy = r600_query_hw_destroy,
311 .begin = r600_query_hw_begin,
312 .end = r600_query_hw_end,
313 .get_result = r600_query_hw_get_result,
314 };
315
316 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
317 struct r600_query_hw *query,
318 struct r600_resource *buffer,
319 uint64_t va);
320 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
321 struct r600_query_hw *query,
322 struct r600_resource *buffer,
323 uint64_t va);
324 static void r600_query_hw_add_result(struct r600_common_context *ctx,
325 struct r600_query_hw *, void *buffer,
326 union pipe_query_result *result);
327 static void r600_query_hw_clear_result(struct r600_query_hw *,
328 union pipe_query_result *);
329
330 static struct r600_query_hw_ops query_hw_default_hw_ops = {
331 .prepare_buffer = r600_query_hw_prepare_buffer,
332 .emit_start = r600_query_hw_do_emit_start,
333 .emit_stop = r600_query_hw_do_emit_stop,
334 .clear_result = r600_query_hw_clear_result,
335 .add_result = r600_query_hw_add_result,
336 };
337
338 boolean r600_query_hw_init(struct r600_common_context *rctx,
339 struct r600_query_hw *query)
340 {
341 query->buffer.buf = r600_new_query_buffer(rctx, query);
342 if (!query->buffer.buf)
343 return FALSE;
344
345 return TRUE;
346 }
347
348 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
349 unsigned query_type,
350 unsigned index)
351 {
352 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
353 if (!query)
354 return NULL;
355
356 query->b.type = query_type;
357 query->b.ops = &query_hw_ops;
358 query->ops = &query_hw_default_hw_ops;
359
360 switch (query_type) {
361 case PIPE_QUERY_OCCLUSION_COUNTER:
362 case PIPE_QUERY_OCCLUSION_PREDICATE:
363 query->result_size = 16 * rctx->max_db;
364 query->num_cs_dw_begin = 6;
365 query->num_cs_dw_end = 6;
366 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
367 break;
368 case PIPE_QUERY_TIME_ELAPSED:
369 query->result_size = 16;
370 query->num_cs_dw_begin = 8;
371 query->num_cs_dw_end = 8;
372 break;
373 case PIPE_QUERY_TIMESTAMP:
374 query->result_size = 8;
375 query->num_cs_dw_end = 8;
376 query->flags = R600_QUERY_HW_FLAG_NO_START;
377 break;
378 case PIPE_QUERY_PRIMITIVES_EMITTED:
379 case PIPE_QUERY_PRIMITIVES_GENERATED:
380 case PIPE_QUERY_SO_STATISTICS:
381 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
382 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
383 query->result_size = 32;
384 query->num_cs_dw_begin = 6;
385 query->num_cs_dw_end = 6;
386 query->stream = index;
387 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
388 break;
389 case PIPE_QUERY_PIPELINE_STATISTICS:
390 /* 11 values on EG, 8 on R600. */
391 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
392 query->num_cs_dw_begin = 6;
393 query->num_cs_dw_end = 6;
394 break;
395 default:
396 assert(0);
397 FREE(query);
398 return NULL;
399 }
400
401 if (!r600_query_hw_init(rctx, query)) {
402 FREE(query);
403 return NULL;
404 }
405
406 return (struct pipe_query *)query;
407 }
408
409 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
410 unsigned type, int diff)
411 {
412 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
413 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
414 bool old_enable = rctx->num_occlusion_queries != 0;
415 bool old_perfect_enable =
416 rctx->num_perfect_occlusion_queries != 0;
417 bool enable, perfect_enable;
418
419 rctx->num_occlusion_queries += diff;
420 assert(rctx->num_occlusion_queries >= 0);
421
422 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
423 rctx->num_perfect_occlusion_queries += diff;
424 assert(rctx->num_perfect_occlusion_queries >= 0);
425 }
426
427 enable = rctx->num_occlusion_queries != 0;
428 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
429
430 if (enable != old_enable || perfect_enable != old_perfect_enable) {
431 rctx->set_occlusion_query_state(&rctx->b, enable);
432 }
433 }
434 }
435
436 static unsigned event_type_for_stream(struct r600_query_hw *query)
437 {
438 switch (query->stream) {
439 default:
440 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
441 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
442 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
443 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
444 }
445 }
446
447 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
448 struct r600_query_hw *query,
449 struct r600_resource *buffer,
450 uint64_t va)
451 {
452 struct radeon_winsys_cs *cs = ctx->gfx.cs;
453
454 switch (query->b.type) {
455 case PIPE_QUERY_OCCLUSION_COUNTER:
456 case PIPE_QUERY_OCCLUSION_PREDICATE:
457 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
458 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
459 radeon_emit(cs, va);
460 radeon_emit(cs, (va >> 32) & 0xFFFF);
461 break;
462 case PIPE_QUERY_PRIMITIVES_EMITTED:
463 case PIPE_QUERY_PRIMITIVES_GENERATED:
464 case PIPE_QUERY_SO_STATISTICS:
465 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
466 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
467 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
468 radeon_emit(cs, va);
469 radeon_emit(cs, (va >> 32) & 0xFFFF);
470 break;
471 case PIPE_QUERY_TIME_ELAPSED:
472 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
473 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
474 radeon_emit(cs, va);
475 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
476 radeon_emit(cs, 0);
477 radeon_emit(cs, 0);
478 break;
479 case PIPE_QUERY_PIPELINE_STATISTICS:
480 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
481 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
482 radeon_emit(cs, va);
483 radeon_emit(cs, (va >> 32) & 0xFFFF);
484 break;
485 default:
486 assert(0);
487 }
488 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
489 RADEON_PRIO_QUERY);
490 }
491
492 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
493 struct r600_query_hw *query)
494 {
495 uint64_t va;
496
497 r600_update_occlusion_query_state(ctx, query->b.type, 1);
498 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
499
500 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
501 TRUE);
502
503 /* Get a new query buffer if needed. */
504 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
505 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
506 *qbuf = query->buffer;
507 query->buffer.buf = r600_new_query_buffer(ctx, query);
508 query->buffer.results_end = 0;
509 query->buffer.previous = qbuf;
510 }
511
512 /* emit begin query */
513 va = query->buffer.buf->gpu_address + query->buffer.results_end;
514
515 query->ops->emit_start(ctx, query, query->buffer.buf, va);
516
517 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
518 }
519
520 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
521 struct r600_query_hw *query,
522 struct r600_resource *buffer,
523 uint64_t va)
524 {
525 struct radeon_winsys_cs *cs = ctx->gfx.cs;
526
527 switch (query->b.type) {
528 case PIPE_QUERY_OCCLUSION_COUNTER:
529 case PIPE_QUERY_OCCLUSION_PREDICATE:
530 va += 8;
531 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
532 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
533 radeon_emit(cs, va);
534 radeon_emit(cs, (va >> 32) & 0xFFFF);
535 break;
536 case PIPE_QUERY_PRIMITIVES_EMITTED:
537 case PIPE_QUERY_PRIMITIVES_GENERATED:
538 case PIPE_QUERY_SO_STATISTICS:
539 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
540 va += query->result_size/2;
541 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
542 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
543 radeon_emit(cs, va);
544 radeon_emit(cs, (va >> 32) & 0xFFFF);
545 break;
546 case PIPE_QUERY_TIME_ELAPSED:
547 va += query->result_size/2;
548 /* fall through */
549 case PIPE_QUERY_TIMESTAMP:
550 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
551 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
552 radeon_emit(cs, va);
553 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
554 radeon_emit(cs, 0);
555 radeon_emit(cs, 0);
556 break;
557 case PIPE_QUERY_PIPELINE_STATISTICS:
558 va += query->result_size/2;
559 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
560 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
561 radeon_emit(cs, va);
562 radeon_emit(cs, (va >> 32) & 0xFFFF);
563 break;
564 default:
565 assert(0);
566 }
567 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
568 RADEON_PRIO_QUERY);
569 }
570
571 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
572 struct r600_query_hw *query)
573 {
574 uint64_t va;
575
576 /* The queries which need begin already called this in begin_query. */
577 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
578 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
579 }
580
581 /* emit end query */
582 va = query->buffer.buf->gpu_address + query->buffer.results_end;
583
584 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
585
586 query->buffer.results_end += query->result_size;
587
588 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
589 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
590
591 r600_update_occlusion_query_state(ctx, query->b.type, -1);
592 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
593 }
594
595 static void r600_emit_query_predication(struct r600_common_context *ctx,
596 struct r600_atom *atom)
597 {
598 struct radeon_winsys_cs *cs = ctx->gfx.cs;
599 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
600 struct r600_query_buffer *qbuf;
601 uint32_t op;
602 bool flag_wait;
603
604 if (!query)
605 return;
606
607 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
608 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
609
610 switch (query->b.type) {
611 case PIPE_QUERY_OCCLUSION_COUNTER:
612 case PIPE_QUERY_OCCLUSION_PREDICATE:
613 op = PRED_OP(PREDICATION_OP_ZPASS);
614 break;
615 case PIPE_QUERY_PRIMITIVES_EMITTED:
616 case PIPE_QUERY_PRIMITIVES_GENERATED:
617 case PIPE_QUERY_SO_STATISTICS:
618 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
619 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
620 break;
621 default:
622 assert(0);
623 return;
624 }
625
626 /* if true then invert, see GL_ARB_conditional_render_inverted */
627 if (ctx->render_cond_invert)
628 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
629 else
630 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
631
632 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
633
634 /* emit predicate packets for all data blocks */
635 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
636 unsigned results_base = 0;
637 uint64_t va = qbuf->buf->gpu_address;
638
639 while (results_base < qbuf->results_end) {
640 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
641 radeon_emit(cs, va + results_base);
642 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
643 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
644 RADEON_PRIO_QUERY);
645 results_base += query->result_size;
646
647 /* set CONTINUE bit for all packets except the first */
648 op |= PREDICATION_CONTINUE;
649 }
650 }
651 }
652
653 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
654 {
655 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
656
657 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
658 query_type == PIPE_QUERY_GPU_FINISHED ||
659 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
660 return r600_query_sw_create(ctx, query_type);
661
662 return r600_query_hw_create(rctx, query_type, index);
663 }
664
665 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
666 {
667 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
668 struct r600_query *rquery = (struct r600_query *)query;
669
670 rquery->ops->destroy(rctx, rquery);
671 }
672
673 static boolean r600_begin_query(struct pipe_context *ctx,
674 struct pipe_query *query)
675 {
676 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
677 struct r600_query *rquery = (struct r600_query *)query;
678
679 return rquery->ops->begin(rctx, rquery);
680 }
681
682 static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
683 struct r600_query_hw *query)
684 {
685 struct r600_query_buffer *prev = query->buffer.previous;
686
687 /* Discard the old query buffers. */
688 while (prev) {
689 struct r600_query_buffer *qbuf = prev;
690 prev = prev->previous;
691 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
692 FREE(qbuf);
693 }
694
695 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
696 /* Obtain a new buffer if the current one can't be mapped without a stall. */
697 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
698 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
699 pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
700 query->buffer.buf = r600_new_query_buffer(rctx, query);
701 } else {
702 query->ops->prepare_buffer(rctx, query, query->buffer.buf);
703 }
704 }
705
706 query->buffer.results_end = 0;
707 query->buffer.previous = NULL;
708 }
709
710 boolean r600_query_hw_begin(struct r600_common_context *rctx,
711 struct r600_query *rquery)
712 {
713 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
714
715 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
716 assert(0);
717 return false;
718 }
719
720 r600_query_hw_reset_buffers(rctx, query);
721
722 r600_query_hw_emit_start(rctx, query);
723
724 LIST_ADDTAIL(&query->list, &rctx->active_queries);
725 return true;
726 }
727
728 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
729 {
730 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
731 struct r600_query *rquery = (struct r600_query *)query;
732
733 rquery->ops->end(rctx, rquery);
734 }
735
736 void r600_query_hw_end(struct r600_common_context *rctx,
737 struct r600_query *rquery)
738 {
739 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
740
741 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
742 r600_query_hw_reset_buffers(rctx, query);
743
744 r600_query_hw_emit_stop(rctx, query);
745
746 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
747 LIST_DELINIT(&query->list);
748 }
749
750 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
751 bool test_status_bit)
752 {
753 uint32_t *current_result = (uint32_t*)map;
754 uint64_t start, end;
755
756 start = (uint64_t)current_result[start_index] |
757 (uint64_t)current_result[start_index+1] << 32;
758 end = (uint64_t)current_result[end_index] |
759 (uint64_t)current_result[end_index+1] << 32;
760
761 if (!test_status_bit ||
762 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
763 return end - start;
764 }
765 return 0;
766 }
767
768 static void r600_query_hw_add_result(struct r600_common_context *ctx,
769 struct r600_query_hw *query,
770 void *buffer,
771 union pipe_query_result *result)
772 {
773 switch (query->b.type) {
774 case PIPE_QUERY_OCCLUSION_COUNTER: {
775 unsigned results_base = 0;
776 while (results_base != query->result_size) {
777 result->u64 +=
778 r600_query_read_result(buffer + results_base, 0, 2, true);
779 results_base += 16;
780 }
781 break;
782 }
783 case PIPE_QUERY_OCCLUSION_PREDICATE: {
784 unsigned results_base = 0;
785 while (results_base != query->result_size) {
786 result->b = result->b ||
787 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
788 results_base += 16;
789 }
790 break;
791 }
792 case PIPE_QUERY_TIME_ELAPSED:
793 result->u64 += r600_query_read_result(buffer, 0, 2, false);
794 break;
795 case PIPE_QUERY_TIMESTAMP:
796 {
797 uint32_t *current_result = (uint32_t*)buffer;
798 result->u64 = (uint64_t)current_result[0] |
799 (uint64_t)current_result[1] << 32;
800 break;
801 }
802 case PIPE_QUERY_PRIMITIVES_EMITTED:
803 /* SAMPLE_STREAMOUTSTATS stores this structure:
804 * {
805 * u64 NumPrimitivesWritten;
806 * u64 PrimitiveStorageNeeded;
807 * }
808 * We only need NumPrimitivesWritten here. */
809 result->u64 += r600_query_read_result(buffer, 2, 6, true);
810 break;
811 case PIPE_QUERY_PRIMITIVES_GENERATED:
812 /* Here we read PrimitiveStorageNeeded. */
813 result->u64 += r600_query_read_result(buffer, 0, 4, true);
814 break;
815 case PIPE_QUERY_SO_STATISTICS:
816 result->so_statistics.num_primitives_written +=
817 r600_query_read_result(buffer, 2, 6, true);
818 result->so_statistics.primitives_storage_needed +=
819 r600_query_read_result(buffer, 0, 4, true);
820 break;
821 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
822 result->b = result->b ||
823 r600_query_read_result(buffer, 2, 6, true) !=
824 r600_query_read_result(buffer, 0, 4, true);
825 break;
826 case PIPE_QUERY_PIPELINE_STATISTICS:
827 if (ctx->chip_class >= EVERGREEN) {
828 result->pipeline_statistics.ps_invocations +=
829 r600_query_read_result(buffer, 0, 22, false);
830 result->pipeline_statistics.c_primitives +=
831 r600_query_read_result(buffer, 2, 24, false);
832 result->pipeline_statistics.c_invocations +=
833 r600_query_read_result(buffer, 4, 26, false);
834 result->pipeline_statistics.vs_invocations +=
835 r600_query_read_result(buffer, 6, 28, false);
836 result->pipeline_statistics.gs_invocations +=
837 r600_query_read_result(buffer, 8, 30, false);
838 result->pipeline_statistics.gs_primitives +=
839 r600_query_read_result(buffer, 10, 32, false);
840 result->pipeline_statistics.ia_primitives +=
841 r600_query_read_result(buffer, 12, 34, false);
842 result->pipeline_statistics.ia_vertices +=
843 r600_query_read_result(buffer, 14, 36, false);
844 result->pipeline_statistics.hs_invocations +=
845 r600_query_read_result(buffer, 16, 38, false);
846 result->pipeline_statistics.ds_invocations +=
847 r600_query_read_result(buffer, 18, 40, false);
848 result->pipeline_statistics.cs_invocations +=
849 r600_query_read_result(buffer, 20, 42, false);
850 } else {
851 result->pipeline_statistics.ps_invocations +=
852 r600_query_read_result(buffer, 0, 16, false);
853 result->pipeline_statistics.c_primitives +=
854 r600_query_read_result(buffer, 2, 18, false);
855 result->pipeline_statistics.c_invocations +=
856 r600_query_read_result(buffer, 4, 20, false);
857 result->pipeline_statistics.vs_invocations +=
858 r600_query_read_result(buffer, 6, 22, false);
859 result->pipeline_statistics.gs_invocations +=
860 r600_query_read_result(buffer, 8, 24, false);
861 result->pipeline_statistics.gs_primitives +=
862 r600_query_read_result(buffer, 10, 26, false);
863 result->pipeline_statistics.ia_primitives +=
864 r600_query_read_result(buffer, 12, 28, false);
865 result->pipeline_statistics.ia_vertices +=
866 r600_query_read_result(buffer, 14, 30, false);
867 }
868 #if 0 /* for testing */
869 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
870 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
871 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
872 result->pipeline_statistics.ia_vertices,
873 result->pipeline_statistics.ia_primitives,
874 result->pipeline_statistics.vs_invocations,
875 result->pipeline_statistics.hs_invocations,
876 result->pipeline_statistics.ds_invocations,
877 result->pipeline_statistics.gs_invocations,
878 result->pipeline_statistics.gs_primitives,
879 result->pipeline_statistics.c_invocations,
880 result->pipeline_statistics.c_primitives,
881 result->pipeline_statistics.ps_invocations,
882 result->pipeline_statistics.cs_invocations);
883 #endif
884 break;
885 default:
886 assert(0);
887 }
888 }
889
890 static boolean r600_get_query_result(struct pipe_context *ctx,
891 struct pipe_query *query, boolean wait,
892 union pipe_query_result *result)
893 {
894 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
895 struct r600_query *rquery = (struct r600_query *)query;
896
897 return rquery->ops->get_result(rctx, rquery, wait, result);
898 }
899
900 static void r600_query_hw_clear_result(struct r600_query_hw *query,
901 union pipe_query_result *result)
902 {
903 util_query_clear_result(result, query->b.type);
904 }
905
906 boolean r600_query_hw_get_result(struct r600_common_context *rctx,
907 struct r600_query *rquery,
908 boolean wait, union pipe_query_result *result)
909 {
910 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
911 struct r600_query_buffer *qbuf;
912
913 query->ops->clear_result(query, result);
914
915 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
916 unsigned results_base = 0;
917 void *map;
918
919 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
920 PIPE_TRANSFER_READ |
921 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
922 if (!map)
923 return FALSE;
924
925 while (results_base != qbuf->results_end) {
926 query->ops->add_result(rctx, query, map + results_base,
927 result);
928 results_base += query->result_size;
929 }
930 }
931
932 /* Convert the time to expected units. */
933 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
934 rquery->type == PIPE_QUERY_TIMESTAMP) {
935 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
936 }
937 return TRUE;
938 }
939
940 static void r600_render_condition(struct pipe_context *ctx,
941 struct pipe_query *query,
942 boolean condition,
943 uint mode)
944 {
945 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
946 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
947 struct r600_query_buffer *qbuf;
948 struct r600_atom *atom = &rctx->render_cond_atom;
949
950 rctx->render_cond = query;
951 rctx->render_cond_invert = condition;
952 rctx->render_cond_mode = mode;
953
954 /* Compute the size of SET_PREDICATION packets. */
955 atom->num_dw = 0;
956 if (query) {
957 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
958 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
959 }
960
961 rctx->set_atom_dirty(rctx, atom, query != NULL);
962 }
963
964 void r600_suspend_queries(struct r600_common_context *ctx)
965 {
966 struct r600_query_hw *query;
967
968 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
969 r600_query_hw_emit_stop(ctx, query);
970 }
971 assert(ctx->num_cs_dw_queries_suspend == 0);
972 }
973
974 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
975 struct list_head *query_list)
976 {
977 struct r600_query_hw *query;
978 unsigned num_dw = 0;
979
980 LIST_FOR_EACH_ENTRY(query, query_list, list) {
981 /* begin + end */
982 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
983
984 /* Workaround for the fact that
985 * num_cs_dw_nontimer_queries_suspend is incremented for every
986 * resumed query, which raises the bar in need_cs_space for
987 * queries about to be resumed.
988 */
989 num_dw += query->num_cs_dw_end;
990 }
991 /* primitives generated query */
992 num_dw += ctx->streamout.enable_atom.num_dw;
993 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
994 num_dw += 13;
995
996 return num_dw;
997 }
998
999 void r600_resume_queries(struct r600_common_context *ctx)
1000 {
1001 struct r600_query_hw *query;
1002 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1003
1004 assert(ctx->num_cs_dw_queries_suspend == 0);
1005
1006 /* Check CS space here. Resuming must not be interrupted by flushes. */
1007 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
1008
1009 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1010 r600_query_hw_emit_start(ctx, query);
1011 }
1012 }
1013
1014 /* Get backends mask */
1015 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1016 {
1017 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1018 struct r600_resource *buffer;
1019 uint32_t *results;
1020 unsigned num_backends = ctx->screen->info.num_render_backends;
1021 unsigned i, mask = 0;
1022
1023 /* if backend_map query is supported by the kernel */
1024 if (ctx->screen->info.r600_gb_backend_map_valid) {
1025 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1026 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1027 unsigned item_width, item_mask;
1028
1029 if (ctx->chip_class >= EVERGREEN) {
1030 item_width = 4;
1031 item_mask = 0x7;
1032 } else {
1033 item_width = 2;
1034 item_mask = 0x3;
1035 }
1036
1037 while (num_tile_pipes--) {
1038 i = backend_map & item_mask;
1039 mask |= (1<<i);
1040 backend_map >>= item_width;
1041 }
1042 if (mask != 0) {
1043 ctx->backend_mask = mask;
1044 return;
1045 }
1046 }
1047
1048 /* otherwise backup path for older kernels */
1049
1050 /* create buffer for event data */
1051 buffer = (struct r600_resource*)
1052 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1053 PIPE_USAGE_STAGING, ctx->max_db*16);
1054 if (!buffer)
1055 goto err;
1056
1057 /* initialize buffer with zeroes */
1058 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1059 if (results) {
1060 memset(results, 0, ctx->max_db * 4 * 4);
1061
1062 /* emit EVENT_WRITE for ZPASS_DONE */
1063 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1064 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1065 radeon_emit(cs, buffer->gpu_address);
1066 radeon_emit(cs, buffer->gpu_address >> 32);
1067
1068 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1069 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1070
1071 /* analyze results */
1072 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1073 if (results) {
1074 for(i = 0; i < ctx->max_db; i++) {
1075 /* at least highest bit will be set if backend is used */
1076 if (results[i*4 + 1])
1077 mask |= (1<<i);
1078 }
1079 }
1080 }
1081
1082 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1083
1084 if (mask != 0) {
1085 ctx->backend_mask = mask;
1086 return;
1087 }
1088
1089 err:
1090 /* fallback to old method - set num_backends lower bits to 1 */
1091 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1092 return;
1093 }
1094
1095 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1096 { \
1097 .name = name_, \
1098 .query_type = R600_QUERY_##query_type_, \
1099 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1100 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1101 .group_id = group_id_ \
1102 }
1103
1104 #define X(name_, query_type_, type_, result_type_) \
1105 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1106
1107 #define XG(group_, name_, query_type_, type_, result_type_) \
1108 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1109
1110 static struct pipe_driver_query_info r600_driver_query_list[] = {
1111 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1112 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1113 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1114 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1115 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1116 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1117 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1118 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1119 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1120 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1121
1122 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1123 * which use it as a fallback path to detect the GPU type.
1124 *
1125 * Note: The names of these queries are significant for GPUPerfStudio
1126 * (and possibly their order as well). */
1127 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1128 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1129 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1130 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1131 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1132
1133 /* The following queries must be at the end of the list because their
1134 * availability is adjusted dynamically based on the DRM version. */
1135 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1136 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1137 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1138 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1139 };
1140
1141 #undef X
1142 #undef XG
1143 #undef XFULL
1144
1145 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1146 {
1147 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1148 return Elements(r600_driver_query_list);
1149 else if (rscreen->info.drm_major == 3)
1150 return Elements(r600_driver_query_list) - 3;
1151 else
1152 return Elements(r600_driver_query_list) - 4;
1153 }
1154
1155 static int r600_get_driver_query_info(struct pipe_screen *screen,
1156 unsigned index,
1157 struct pipe_driver_query_info *info)
1158 {
1159 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1160 unsigned num_queries = r600_get_num_queries(rscreen);
1161
1162 if (!info) {
1163 unsigned num_perfcounters =
1164 r600_get_perfcounter_info(rscreen, 0, NULL);
1165
1166 return num_queries + num_perfcounters;
1167 }
1168
1169 if (index >= num_queries)
1170 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1171
1172 *info = r600_driver_query_list[index];
1173
1174 switch (info->query_type) {
1175 case R600_QUERY_REQUESTED_VRAM:
1176 case R600_QUERY_VRAM_USAGE:
1177 info->max_value.u64 = rscreen->info.vram_size;
1178 break;
1179 case R600_QUERY_REQUESTED_GTT:
1180 case R600_QUERY_GTT_USAGE:
1181 info->max_value.u64 = rscreen->info.gart_size;
1182 break;
1183 case R600_QUERY_GPU_TEMPERATURE:
1184 info->max_value.u64 = 125;
1185 break;
1186 }
1187
1188 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1189 info->group_id += rscreen->perfcounters->num_groups;
1190
1191 return 1;
1192 }
1193
1194 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1195 * performance counter groups, so be careful when changing this and related
1196 * functions.
1197 */
1198 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1199 unsigned index,
1200 struct pipe_driver_query_group_info *info)
1201 {
1202 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1203 unsigned num_pc_groups = 0;
1204
1205 if (rscreen->perfcounters)
1206 num_pc_groups = rscreen->perfcounters->num_groups;
1207
1208 if (!info)
1209 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1210
1211 if (index < num_pc_groups)
1212 return r600_get_perfcounter_group_info(rscreen, index, info);
1213
1214 index -= num_pc_groups;
1215 if (index >= R600_NUM_SW_QUERY_GROUPS)
1216 return 0;
1217
1218 info->name = "GPIN";
1219 info->max_active_queries = 5;
1220 info->num_queries = 5;
1221 return 1;
1222 }
1223
1224 void r600_query_init(struct r600_common_context *rctx)
1225 {
1226 rctx->b.create_query = r600_create_query;
1227 rctx->b.create_batch_query = r600_create_batch_query;
1228 rctx->b.destroy_query = r600_destroy_query;
1229 rctx->b.begin_query = r600_begin_query;
1230 rctx->b.end_query = r600_end_query;
1231 rctx->b.get_query_result = r600_get_query_result;
1232 rctx->render_cond_atom.emit = r600_emit_query_predication;
1233
1234 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1235 rctx->b.render_condition = r600_render_condition;
1236
1237 LIST_INITHEAD(&rctx->active_queries);
1238 }
1239
1240 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1241 {
1242 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1243 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1244 }