ac71a4358e0ede71fa89f232a5b2ee6b84e0d494
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29
30 #include "tgsi/tgsi_text.h"
31
32 struct r600_hw_query_params {
33 unsigned start_offset;
34 unsigned end_offset;
35 unsigned fence_offset;
36 unsigned pair_stride;
37 unsigned pair_count;
38 };
39
40 /* Queries without buffer handling or suspend/resume. */
41 struct r600_query_sw {
42 struct r600_query b;
43
44 uint64_t begin_result;
45 uint64_t end_result;
46 /* Fence for GPU_FINISHED. */
47 struct pipe_fence_handle *fence;
48 };
49
50 static void r600_query_sw_destroy(struct r600_common_context *rctx,
51 struct r600_query *rquery)
52 {
53 struct pipe_screen *screen = rctx->b.screen;
54 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
55
56 screen->fence_reference(screen, &query->fence, NULL);
57 FREE(query);
58 }
59
60 static enum radeon_value_id winsys_id_from_type(unsigned type)
61 {
62 switch (type) {
63 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
64 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
65 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
66 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
67 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
68 case R600_QUERY_NUM_CTX_FLUSHES: return RADEON_NUM_CS_FLUSHES;
69 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
70 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
71 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
72 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
73 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
74 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
75 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
76 default: unreachable("query type does not correspond to winsys id");
77 }
78 }
79
80 static bool r600_query_sw_begin(struct r600_common_context *rctx,
81 struct r600_query *rquery)
82 {
83 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
84
85 switch(query->b.type) {
86 case PIPE_QUERY_TIMESTAMP_DISJOINT:
87 case PIPE_QUERY_GPU_FINISHED:
88 break;
89 case R600_QUERY_DRAW_CALLS:
90 query->begin_result = rctx->num_draw_calls;
91 break;
92 case R600_QUERY_SPILL_DRAW_CALLS:
93 query->begin_result = rctx->num_spill_draw_calls;
94 break;
95 case R600_QUERY_COMPUTE_CALLS:
96 query->begin_result = rctx->num_compute_calls;
97 break;
98 case R600_QUERY_SPILL_COMPUTE_CALLS:
99 query->begin_result = rctx->num_spill_compute_calls;
100 break;
101 case R600_QUERY_DMA_CALLS:
102 query->begin_result = rctx->num_dma_calls;
103 break;
104 case R600_QUERY_NUM_VS_FLUSHES:
105 query->begin_result = rctx->num_vs_flushes;
106 break;
107 case R600_QUERY_NUM_PS_FLUSHES:
108 query->begin_result = rctx->num_ps_flushes;
109 break;
110 case R600_QUERY_NUM_CS_FLUSHES:
111 query->begin_result = rctx->num_cs_flushes;
112 break;
113 case R600_QUERY_REQUESTED_VRAM:
114 case R600_QUERY_REQUESTED_GTT:
115 case R600_QUERY_MAPPED_VRAM:
116 case R600_QUERY_MAPPED_GTT:
117 case R600_QUERY_VRAM_USAGE:
118 case R600_QUERY_GTT_USAGE:
119 case R600_QUERY_GPU_TEMPERATURE:
120 case R600_QUERY_CURRENT_GPU_SCLK:
121 case R600_QUERY_CURRENT_GPU_MCLK:
122 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
123 query->begin_result = 0;
124 break;
125 case R600_QUERY_BUFFER_WAIT_TIME:
126 case R600_QUERY_NUM_CTX_FLUSHES:
127 case R600_QUERY_NUM_BYTES_MOVED:
128 case R600_QUERY_NUM_EVICTIONS: {
129 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
130 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
131 break;
132 }
133 case R600_QUERY_GPU_LOAD:
134 query->begin_result = r600_gpu_load_begin(rctx->screen);
135 break;
136 case R600_QUERY_NUM_COMPILATIONS:
137 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
138 break;
139 case R600_QUERY_NUM_SHADERS_CREATED:
140 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
141 break;
142 case R600_QUERY_GPIN_ASIC_ID:
143 case R600_QUERY_GPIN_NUM_SIMD:
144 case R600_QUERY_GPIN_NUM_RB:
145 case R600_QUERY_GPIN_NUM_SPI:
146 case R600_QUERY_GPIN_NUM_SE:
147 break;
148 default:
149 unreachable("r600_query_sw_begin: bad query type");
150 }
151
152 return true;
153 }
154
155 static bool r600_query_sw_end(struct r600_common_context *rctx,
156 struct r600_query *rquery)
157 {
158 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
159
160 switch(query->b.type) {
161 case PIPE_QUERY_TIMESTAMP_DISJOINT:
162 break;
163 case PIPE_QUERY_GPU_FINISHED:
164 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
165 break;
166 case R600_QUERY_DRAW_CALLS:
167 query->end_result = rctx->num_draw_calls;
168 break;
169 case R600_QUERY_SPILL_DRAW_CALLS:
170 query->end_result = rctx->num_spill_draw_calls;
171 break;
172 case R600_QUERY_COMPUTE_CALLS:
173 query->end_result = rctx->num_compute_calls;
174 break;
175 case R600_QUERY_SPILL_COMPUTE_CALLS:
176 query->end_result = rctx->num_spill_compute_calls;
177 break;
178 case R600_QUERY_DMA_CALLS:
179 query->end_result = rctx->num_dma_calls;
180 break;
181 case R600_QUERY_NUM_VS_FLUSHES:
182 query->end_result = rctx->num_vs_flushes;
183 break;
184 case R600_QUERY_NUM_PS_FLUSHES:
185 query->end_result = rctx->num_ps_flushes;
186 break;
187 case R600_QUERY_NUM_CS_FLUSHES:
188 query->end_result = rctx->num_cs_flushes;
189 break;
190 case R600_QUERY_REQUESTED_VRAM:
191 case R600_QUERY_REQUESTED_GTT:
192 case R600_QUERY_MAPPED_VRAM:
193 case R600_QUERY_MAPPED_GTT:
194 case R600_QUERY_VRAM_USAGE:
195 case R600_QUERY_GTT_USAGE:
196 case R600_QUERY_GPU_TEMPERATURE:
197 case R600_QUERY_CURRENT_GPU_SCLK:
198 case R600_QUERY_CURRENT_GPU_MCLK:
199 case R600_QUERY_BUFFER_WAIT_TIME:
200 case R600_QUERY_NUM_CTX_FLUSHES:
201 case R600_QUERY_NUM_BYTES_MOVED:
202 case R600_QUERY_NUM_EVICTIONS: {
203 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
204 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
205 break;
206 }
207 case R600_QUERY_GPU_LOAD:
208 query->end_result = r600_gpu_load_end(rctx->screen,
209 query->begin_result);
210 query->begin_result = 0;
211 break;
212 case R600_QUERY_NUM_COMPILATIONS:
213 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
214 break;
215 case R600_QUERY_NUM_SHADERS_CREATED:
216 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
217 break;
218 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
219 query->end_result = rctx->last_tex_ps_draw_ratio;
220 break;
221 case R600_QUERY_GPIN_ASIC_ID:
222 case R600_QUERY_GPIN_NUM_SIMD:
223 case R600_QUERY_GPIN_NUM_RB:
224 case R600_QUERY_GPIN_NUM_SPI:
225 case R600_QUERY_GPIN_NUM_SE:
226 break;
227 default:
228 unreachable("r600_query_sw_end: bad query type");
229 }
230
231 return true;
232 }
233
234 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
235 struct r600_query *rquery,
236 bool wait,
237 union pipe_query_result *result)
238 {
239 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
240
241 switch (query->b.type) {
242 case PIPE_QUERY_TIMESTAMP_DISJOINT:
243 /* Convert from cycles per millisecond to cycles per second (Hz). */
244 result->timestamp_disjoint.frequency =
245 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
246 result->timestamp_disjoint.disjoint = false;
247 return true;
248 case PIPE_QUERY_GPU_FINISHED: {
249 struct pipe_screen *screen = rctx->b.screen;
250 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
251 wait ? PIPE_TIMEOUT_INFINITE : 0);
252 return result->b;
253 }
254
255 case R600_QUERY_GPIN_ASIC_ID:
256 result->u32 = 0;
257 return true;
258 case R600_QUERY_GPIN_NUM_SIMD:
259 result->u32 = rctx->screen->info.num_good_compute_units;
260 return true;
261 case R600_QUERY_GPIN_NUM_RB:
262 result->u32 = rctx->screen->info.num_render_backends;
263 return true;
264 case R600_QUERY_GPIN_NUM_SPI:
265 result->u32 = 1; /* all supported chips have one SPI per SE */
266 return true;
267 case R600_QUERY_GPIN_NUM_SE:
268 result->u32 = rctx->screen->info.max_se;
269 return true;
270 }
271
272 result->u64 = query->end_result - query->begin_result;
273
274 switch (query->b.type) {
275 case R600_QUERY_BUFFER_WAIT_TIME:
276 case R600_QUERY_GPU_TEMPERATURE:
277 result->u64 /= 1000;
278 break;
279 case R600_QUERY_CURRENT_GPU_SCLK:
280 case R600_QUERY_CURRENT_GPU_MCLK:
281 result->u64 *= 1000000;
282 break;
283 }
284
285 return true;
286 }
287
288
289 static struct r600_query_ops sw_query_ops = {
290 .destroy = r600_query_sw_destroy,
291 .begin = r600_query_sw_begin,
292 .end = r600_query_sw_end,
293 .get_result = r600_query_sw_get_result,
294 .get_result_resource = NULL
295 };
296
297 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
298 unsigned query_type)
299 {
300 struct r600_query_sw *query;
301
302 query = CALLOC_STRUCT(r600_query_sw);
303 if (!query)
304 return NULL;
305
306 query->b.type = query_type;
307 query->b.ops = &sw_query_ops;
308
309 return (struct pipe_query *)query;
310 }
311
312 void r600_query_hw_destroy(struct r600_common_context *rctx,
313 struct r600_query *rquery)
314 {
315 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
316 struct r600_query_buffer *prev = query->buffer.previous;
317
318 /* Release all query buffers. */
319 while (prev) {
320 struct r600_query_buffer *qbuf = prev;
321 prev = prev->previous;
322 r600_resource_reference(&qbuf->buf, NULL);
323 FREE(qbuf);
324 }
325
326 r600_resource_reference(&query->buffer.buf, NULL);
327 FREE(rquery);
328 }
329
330 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
331 struct r600_query_hw *query)
332 {
333 unsigned buf_size = MAX2(query->result_size,
334 ctx->screen->info.min_alloc_size);
335
336 /* Queries are normally read by the CPU after
337 * being written by the gpu, hence staging is probably a good
338 * usage pattern.
339 */
340 struct r600_resource *buf = (struct r600_resource*)
341 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
342 PIPE_USAGE_STAGING, buf_size);
343 if (!buf)
344 return NULL;
345
346 if (!query->ops->prepare_buffer(ctx, query, buf)) {
347 r600_resource_reference(&buf, NULL);
348 return NULL;
349 }
350
351 return buf;
352 }
353
354 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
355 struct r600_query_hw *query,
356 struct r600_resource *buffer)
357 {
358 /* Callers ensure that the buffer is currently unused by the GPU. */
359 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
360 PIPE_TRANSFER_WRITE |
361 PIPE_TRANSFER_UNSYNCHRONIZED);
362 if (!results)
363 return false;
364
365 memset(results, 0, buffer->b.b.width0);
366
367 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
368 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
369 unsigned num_results;
370 unsigned i, j;
371
372 /* Set top bits for unused backends. */
373 num_results = buffer->b.b.width0 / query->result_size;
374 for (j = 0; j < num_results; j++) {
375 for (i = 0; i < ctx->max_db; i++) {
376 if (!(ctx->backend_mask & (1<<i))) {
377 results[(i * 4)+1] = 0x80000000;
378 results[(i * 4)+3] = 0x80000000;
379 }
380 }
381 results += 4 * ctx->max_db;
382 }
383 }
384
385 return true;
386 }
387
388 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
389 struct r600_query *rquery,
390 bool wait,
391 enum pipe_query_value_type result_type,
392 int index,
393 struct pipe_resource *resource,
394 unsigned offset);
395
396 static struct r600_query_ops query_hw_ops = {
397 .destroy = r600_query_hw_destroy,
398 .begin = r600_query_hw_begin,
399 .end = r600_query_hw_end,
400 .get_result = r600_query_hw_get_result,
401 .get_result_resource = r600_query_hw_get_result_resource,
402 };
403
404 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
405 struct r600_query_hw *query,
406 struct r600_resource *buffer,
407 uint64_t va);
408 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
409 struct r600_query_hw *query,
410 struct r600_resource *buffer,
411 uint64_t va);
412 static void r600_query_hw_add_result(struct r600_common_context *ctx,
413 struct r600_query_hw *, void *buffer,
414 union pipe_query_result *result);
415 static void r600_query_hw_clear_result(struct r600_query_hw *,
416 union pipe_query_result *);
417
418 static struct r600_query_hw_ops query_hw_default_hw_ops = {
419 .prepare_buffer = r600_query_hw_prepare_buffer,
420 .emit_start = r600_query_hw_do_emit_start,
421 .emit_stop = r600_query_hw_do_emit_stop,
422 .clear_result = r600_query_hw_clear_result,
423 .add_result = r600_query_hw_add_result,
424 };
425
426 bool r600_query_hw_init(struct r600_common_context *rctx,
427 struct r600_query_hw *query)
428 {
429 query->buffer.buf = r600_new_query_buffer(rctx, query);
430 if (!query->buffer.buf)
431 return false;
432
433 return true;
434 }
435
436 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
437 unsigned query_type,
438 unsigned index)
439 {
440 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
441 if (!query)
442 return NULL;
443
444 query->b.type = query_type;
445 query->b.ops = &query_hw_ops;
446 query->ops = &query_hw_default_hw_ops;
447
448 switch (query_type) {
449 case PIPE_QUERY_OCCLUSION_COUNTER:
450 case PIPE_QUERY_OCCLUSION_PREDICATE:
451 query->result_size = 16 * rctx->max_db;
452 query->result_size += 16; /* for the fence + alignment */
453 query->num_cs_dw_begin = 6;
454 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
455 break;
456 case PIPE_QUERY_TIME_ELAPSED:
457 query->result_size = 24;
458 query->num_cs_dw_begin = 8;
459 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
460 break;
461 case PIPE_QUERY_TIMESTAMP:
462 query->result_size = 16;
463 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
464 query->flags = R600_QUERY_HW_FLAG_NO_START;
465 break;
466 case PIPE_QUERY_PRIMITIVES_EMITTED:
467 case PIPE_QUERY_PRIMITIVES_GENERATED:
468 case PIPE_QUERY_SO_STATISTICS:
469 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
470 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
471 query->result_size = 32;
472 query->num_cs_dw_begin = 6;
473 query->num_cs_dw_end = 6;
474 query->stream = index;
475 break;
476 case PIPE_QUERY_PIPELINE_STATISTICS:
477 /* 11 values on EG, 8 on R600. */
478 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
479 query->result_size += 8; /* for the fence + alignment */
480 query->num_cs_dw_begin = 6;
481 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
482 break;
483 default:
484 assert(0);
485 FREE(query);
486 return NULL;
487 }
488
489 if (!r600_query_hw_init(rctx, query)) {
490 FREE(query);
491 return NULL;
492 }
493
494 return (struct pipe_query *)query;
495 }
496
497 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
498 unsigned type, int diff)
499 {
500 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
501 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
502 bool old_enable = rctx->num_occlusion_queries != 0;
503 bool old_perfect_enable =
504 rctx->num_perfect_occlusion_queries != 0;
505 bool enable, perfect_enable;
506
507 rctx->num_occlusion_queries += diff;
508 assert(rctx->num_occlusion_queries >= 0);
509
510 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
511 rctx->num_perfect_occlusion_queries += diff;
512 assert(rctx->num_perfect_occlusion_queries >= 0);
513 }
514
515 enable = rctx->num_occlusion_queries != 0;
516 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
517
518 if (enable != old_enable || perfect_enable != old_perfect_enable) {
519 rctx->set_occlusion_query_state(&rctx->b, enable);
520 }
521 }
522 }
523
524 static unsigned event_type_for_stream(struct r600_query_hw *query)
525 {
526 switch (query->stream) {
527 default:
528 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
529 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
530 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
531 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
532 }
533 }
534
535 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
536 struct r600_query_hw *query,
537 struct r600_resource *buffer,
538 uint64_t va)
539 {
540 struct radeon_winsys_cs *cs = ctx->gfx.cs;
541
542 switch (query->b.type) {
543 case PIPE_QUERY_OCCLUSION_COUNTER:
544 case PIPE_QUERY_OCCLUSION_PREDICATE:
545 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
546 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
547 radeon_emit(cs, va);
548 radeon_emit(cs, (va >> 32) & 0xFFFF);
549 break;
550 case PIPE_QUERY_PRIMITIVES_EMITTED:
551 case PIPE_QUERY_PRIMITIVES_GENERATED:
552 case PIPE_QUERY_SO_STATISTICS:
553 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
554 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
555 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
556 radeon_emit(cs, va);
557 radeon_emit(cs, (va >> 32) & 0xFFFF);
558 break;
559 case PIPE_QUERY_TIME_ELAPSED:
560 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
561 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
562 radeon_emit(cs, va);
563 radeon_emit(cs, EOP_DATA_SEL(3) | ((va >> 32) & 0xFFFF));
564 radeon_emit(cs, 0);
565 radeon_emit(cs, 0);
566 break;
567 case PIPE_QUERY_PIPELINE_STATISTICS:
568 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
569 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
570 radeon_emit(cs, va);
571 radeon_emit(cs, (va >> 32) & 0xFFFF);
572 break;
573 default:
574 assert(0);
575 }
576 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
577 RADEON_PRIO_QUERY);
578 }
579
580 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
581 struct r600_query_hw *query)
582 {
583 uint64_t va;
584
585 if (!query->buffer.buf)
586 return; // previous buffer allocation failure
587
588 r600_update_occlusion_query_state(ctx, query->b.type, 1);
589 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
590
591 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
592 true);
593
594 /* Get a new query buffer if needed. */
595 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
596 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
597 *qbuf = query->buffer;
598 query->buffer.results_end = 0;
599 query->buffer.previous = qbuf;
600 query->buffer.buf = r600_new_query_buffer(ctx, query);
601 if (!query->buffer.buf)
602 return;
603 }
604
605 /* emit begin query */
606 va = query->buffer.buf->gpu_address + query->buffer.results_end;
607
608 query->ops->emit_start(ctx, query, query->buffer.buf, va);
609
610 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
611 }
612
613 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
614 struct r600_query_hw *query,
615 struct r600_resource *buffer,
616 uint64_t va)
617 {
618 struct radeon_winsys_cs *cs = ctx->gfx.cs;
619 uint64_t fence_va = 0;
620
621 switch (query->b.type) {
622 case PIPE_QUERY_OCCLUSION_COUNTER:
623 case PIPE_QUERY_OCCLUSION_PREDICATE:
624 va += 8;
625 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
626 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
627 radeon_emit(cs, va);
628 radeon_emit(cs, (va >> 32) & 0xFFFF);
629
630 fence_va = va + ctx->max_db * 16 - 8;
631 break;
632 case PIPE_QUERY_PRIMITIVES_EMITTED:
633 case PIPE_QUERY_PRIMITIVES_GENERATED:
634 case PIPE_QUERY_SO_STATISTICS:
635 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
636 va += query->result_size/2;
637 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
638 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
639 radeon_emit(cs, va);
640 radeon_emit(cs, (va >> 32) & 0xFFFF);
641 break;
642 case PIPE_QUERY_TIME_ELAPSED:
643 va += 8;
644 /* fall through */
645 case PIPE_QUERY_TIMESTAMP:
646 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
647 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
648 radeon_emit(cs, va);
649 radeon_emit(cs, EOP_DATA_SEL(3) | ((va >> 32) & 0xFFFF));
650 radeon_emit(cs, 0);
651 radeon_emit(cs, 0);
652
653 fence_va = va + 8;
654 break;
655 case PIPE_QUERY_PIPELINE_STATISTICS: {
656 unsigned sample_size = (query->result_size - 8) / 2;
657
658 va += sample_size;
659 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
660 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
661 radeon_emit(cs, va);
662 radeon_emit(cs, (va >> 32) & 0xFFFF);
663
664 fence_va = va + sample_size;
665 break;
666 }
667 default:
668 assert(0);
669 }
670 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
671 RADEON_PRIO_QUERY);
672
673 if (fence_va)
674 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
675 query->buffer.buf, fence_va, 0, 0x80000000);
676 }
677
678 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
679 struct r600_query_hw *query)
680 {
681 uint64_t va;
682
683 if (!query->buffer.buf)
684 return; // previous buffer allocation failure
685
686 /* The queries which need begin already called this in begin_query. */
687 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
688 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
689 }
690
691 /* emit end query */
692 va = query->buffer.buf->gpu_address + query->buffer.results_end;
693
694 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
695
696 query->buffer.results_end += query->result_size;
697
698 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
699 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
700
701 r600_update_occlusion_query_state(ctx, query->b.type, -1);
702 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
703 }
704
705 static void r600_emit_query_predication(struct r600_common_context *ctx,
706 struct r600_atom *atom)
707 {
708 struct radeon_winsys_cs *cs = ctx->gfx.cs;
709 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
710 struct r600_query_buffer *qbuf;
711 uint32_t op;
712 bool flag_wait;
713
714 if (!query)
715 return;
716
717 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
718 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
719
720 switch (query->b.type) {
721 case PIPE_QUERY_OCCLUSION_COUNTER:
722 case PIPE_QUERY_OCCLUSION_PREDICATE:
723 op = PRED_OP(PREDICATION_OP_ZPASS);
724 break;
725 case PIPE_QUERY_PRIMITIVES_EMITTED:
726 case PIPE_QUERY_PRIMITIVES_GENERATED:
727 case PIPE_QUERY_SO_STATISTICS:
728 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
729 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
730 break;
731 default:
732 assert(0);
733 return;
734 }
735
736 /* if true then invert, see GL_ARB_conditional_render_inverted */
737 if (ctx->render_cond_invert)
738 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
739 else
740 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
741
742 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
743
744 /* emit predicate packets for all data blocks */
745 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
746 unsigned results_base = 0;
747 uint64_t va = qbuf->buf->gpu_address;
748
749 while (results_base < qbuf->results_end) {
750 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
751 radeon_emit(cs, va + results_base);
752 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
753 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
754 RADEON_PRIO_QUERY);
755 results_base += query->result_size;
756
757 /* set CONTINUE bit for all packets except the first */
758 op |= PREDICATION_CONTINUE;
759 }
760 }
761 }
762
763 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
764 {
765 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
766
767 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
768 query_type == PIPE_QUERY_GPU_FINISHED ||
769 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
770 return r600_query_sw_create(ctx, query_type);
771
772 return r600_query_hw_create(rctx, query_type, index);
773 }
774
775 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
776 {
777 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
778 struct r600_query *rquery = (struct r600_query *)query;
779
780 rquery->ops->destroy(rctx, rquery);
781 }
782
783 static boolean r600_begin_query(struct pipe_context *ctx,
784 struct pipe_query *query)
785 {
786 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
787 struct r600_query *rquery = (struct r600_query *)query;
788
789 return rquery->ops->begin(rctx, rquery);
790 }
791
792 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
793 struct r600_query_hw *query)
794 {
795 struct r600_query_buffer *prev = query->buffer.previous;
796
797 /* Discard the old query buffers. */
798 while (prev) {
799 struct r600_query_buffer *qbuf = prev;
800 prev = prev->previous;
801 r600_resource_reference(&qbuf->buf, NULL);
802 FREE(qbuf);
803 }
804
805 query->buffer.results_end = 0;
806 query->buffer.previous = NULL;
807
808 /* Obtain a new buffer if the current one can't be mapped without a stall. */
809 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
810 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
811 r600_resource_reference(&query->buffer.buf, NULL);
812 query->buffer.buf = r600_new_query_buffer(rctx, query);
813 } else {
814 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
815 r600_resource_reference(&query->buffer.buf, NULL);
816 }
817 }
818
819 bool r600_query_hw_begin(struct r600_common_context *rctx,
820 struct r600_query *rquery)
821 {
822 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
823
824 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
825 assert(0);
826 return false;
827 }
828
829 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
830 r600_query_hw_reset_buffers(rctx, query);
831
832 r600_query_hw_emit_start(rctx, query);
833 if (!query->buffer.buf)
834 return false;
835
836 LIST_ADDTAIL(&query->list, &rctx->active_queries);
837 return true;
838 }
839
840 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
841 {
842 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
843 struct r600_query *rquery = (struct r600_query *)query;
844
845 return rquery->ops->end(rctx, rquery);
846 }
847
848 bool r600_query_hw_end(struct r600_common_context *rctx,
849 struct r600_query *rquery)
850 {
851 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
852
853 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
854 r600_query_hw_reset_buffers(rctx, query);
855
856 r600_query_hw_emit_stop(rctx, query);
857
858 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
859 LIST_DELINIT(&query->list);
860
861 if (!query->buffer.buf)
862 return false;
863
864 return true;
865 }
866
867 static void r600_get_hw_query_params(struct r600_common_context *rctx,
868 struct r600_query_hw *rquery, int index,
869 struct r600_hw_query_params *params)
870 {
871 params->pair_stride = 0;
872 params->pair_count = 1;
873
874 switch (rquery->b.type) {
875 case PIPE_QUERY_OCCLUSION_COUNTER:
876 case PIPE_QUERY_OCCLUSION_PREDICATE:
877 params->start_offset = 0;
878 params->end_offset = 8;
879 params->fence_offset = rctx->max_db * 16;
880 params->pair_stride = 16;
881 params->pair_count = rctx->max_db;
882 break;
883 case PIPE_QUERY_TIME_ELAPSED:
884 params->start_offset = 0;
885 params->end_offset = 8;
886 params->fence_offset = 16;
887 break;
888 case PIPE_QUERY_TIMESTAMP:
889 params->start_offset = 0;
890 params->end_offset = 0;
891 params->fence_offset = 8;
892 break;
893 case PIPE_QUERY_PRIMITIVES_EMITTED:
894 params->start_offset = 8;
895 params->end_offset = 24;
896 params->fence_offset = params->end_offset + 4;
897 break;
898 case PIPE_QUERY_PRIMITIVES_GENERATED:
899 params->start_offset = 0;
900 params->end_offset = 16;
901 params->fence_offset = params->end_offset + 4;
902 break;
903 case PIPE_QUERY_SO_STATISTICS:
904 params->start_offset = 8 - index * 8;
905 params->end_offset = 24 - index * 8;
906 params->fence_offset = params->end_offset + 4;
907 break;
908 case PIPE_QUERY_PIPELINE_STATISTICS:
909 {
910 /* Offsets apply to EG+ */
911 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
912 params->start_offset = offsets[index];
913 params->end_offset = 88 + offsets[index];
914 params->fence_offset = 2 * 88;
915 break;
916 }
917 default:
918 unreachable("r600_get_hw_query_params unsupported");
919 }
920 }
921
922 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
923 bool test_status_bit)
924 {
925 uint32_t *current_result = (uint32_t*)map;
926 uint64_t start, end;
927
928 start = (uint64_t)current_result[start_index] |
929 (uint64_t)current_result[start_index+1] << 32;
930 end = (uint64_t)current_result[end_index] |
931 (uint64_t)current_result[end_index+1] << 32;
932
933 if (!test_status_bit ||
934 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
935 return end - start;
936 }
937 return 0;
938 }
939
940 static void r600_query_hw_add_result(struct r600_common_context *ctx,
941 struct r600_query_hw *query,
942 void *buffer,
943 union pipe_query_result *result)
944 {
945 switch (query->b.type) {
946 case PIPE_QUERY_OCCLUSION_COUNTER: {
947 for (unsigned i = 0; i < ctx->max_db; ++i) {
948 unsigned results_base = i * 16;
949 result->u64 +=
950 r600_query_read_result(buffer + results_base, 0, 2, true);
951 }
952 break;
953 }
954 case PIPE_QUERY_OCCLUSION_PREDICATE: {
955 for (unsigned i = 0; i < ctx->max_db; ++i) {
956 unsigned results_base = i * 16;
957 result->b = result->b ||
958 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
959 }
960 break;
961 }
962 case PIPE_QUERY_TIME_ELAPSED:
963 result->u64 += r600_query_read_result(buffer, 0, 2, false);
964 break;
965 case PIPE_QUERY_TIMESTAMP:
966 result->u64 = *(uint64_t*)buffer;
967 break;
968 case PIPE_QUERY_PRIMITIVES_EMITTED:
969 /* SAMPLE_STREAMOUTSTATS stores this structure:
970 * {
971 * u64 NumPrimitivesWritten;
972 * u64 PrimitiveStorageNeeded;
973 * }
974 * We only need NumPrimitivesWritten here. */
975 result->u64 += r600_query_read_result(buffer, 2, 6, true);
976 break;
977 case PIPE_QUERY_PRIMITIVES_GENERATED:
978 /* Here we read PrimitiveStorageNeeded. */
979 result->u64 += r600_query_read_result(buffer, 0, 4, true);
980 break;
981 case PIPE_QUERY_SO_STATISTICS:
982 result->so_statistics.num_primitives_written +=
983 r600_query_read_result(buffer, 2, 6, true);
984 result->so_statistics.primitives_storage_needed +=
985 r600_query_read_result(buffer, 0, 4, true);
986 break;
987 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
988 result->b = result->b ||
989 r600_query_read_result(buffer, 2, 6, true) !=
990 r600_query_read_result(buffer, 0, 4, true);
991 break;
992 case PIPE_QUERY_PIPELINE_STATISTICS:
993 if (ctx->chip_class >= EVERGREEN) {
994 result->pipeline_statistics.ps_invocations +=
995 r600_query_read_result(buffer, 0, 22, false);
996 result->pipeline_statistics.c_primitives +=
997 r600_query_read_result(buffer, 2, 24, false);
998 result->pipeline_statistics.c_invocations +=
999 r600_query_read_result(buffer, 4, 26, false);
1000 result->pipeline_statistics.vs_invocations +=
1001 r600_query_read_result(buffer, 6, 28, false);
1002 result->pipeline_statistics.gs_invocations +=
1003 r600_query_read_result(buffer, 8, 30, false);
1004 result->pipeline_statistics.gs_primitives +=
1005 r600_query_read_result(buffer, 10, 32, false);
1006 result->pipeline_statistics.ia_primitives +=
1007 r600_query_read_result(buffer, 12, 34, false);
1008 result->pipeline_statistics.ia_vertices +=
1009 r600_query_read_result(buffer, 14, 36, false);
1010 result->pipeline_statistics.hs_invocations +=
1011 r600_query_read_result(buffer, 16, 38, false);
1012 result->pipeline_statistics.ds_invocations +=
1013 r600_query_read_result(buffer, 18, 40, false);
1014 result->pipeline_statistics.cs_invocations +=
1015 r600_query_read_result(buffer, 20, 42, false);
1016 } else {
1017 result->pipeline_statistics.ps_invocations +=
1018 r600_query_read_result(buffer, 0, 16, false);
1019 result->pipeline_statistics.c_primitives +=
1020 r600_query_read_result(buffer, 2, 18, false);
1021 result->pipeline_statistics.c_invocations +=
1022 r600_query_read_result(buffer, 4, 20, false);
1023 result->pipeline_statistics.vs_invocations +=
1024 r600_query_read_result(buffer, 6, 22, false);
1025 result->pipeline_statistics.gs_invocations +=
1026 r600_query_read_result(buffer, 8, 24, false);
1027 result->pipeline_statistics.gs_primitives +=
1028 r600_query_read_result(buffer, 10, 26, false);
1029 result->pipeline_statistics.ia_primitives +=
1030 r600_query_read_result(buffer, 12, 28, false);
1031 result->pipeline_statistics.ia_vertices +=
1032 r600_query_read_result(buffer, 14, 30, false);
1033 }
1034 #if 0 /* for testing */
1035 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1036 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1037 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1038 result->pipeline_statistics.ia_vertices,
1039 result->pipeline_statistics.ia_primitives,
1040 result->pipeline_statistics.vs_invocations,
1041 result->pipeline_statistics.hs_invocations,
1042 result->pipeline_statistics.ds_invocations,
1043 result->pipeline_statistics.gs_invocations,
1044 result->pipeline_statistics.gs_primitives,
1045 result->pipeline_statistics.c_invocations,
1046 result->pipeline_statistics.c_primitives,
1047 result->pipeline_statistics.ps_invocations,
1048 result->pipeline_statistics.cs_invocations);
1049 #endif
1050 break;
1051 default:
1052 assert(0);
1053 }
1054 }
1055
1056 static boolean r600_get_query_result(struct pipe_context *ctx,
1057 struct pipe_query *query, boolean wait,
1058 union pipe_query_result *result)
1059 {
1060 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1061 struct r600_query *rquery = (struct r600_query *)query;
1062
1063 return rquery->ops->get_result(rctx, rquery, wait, result);
1064 }
1065
1066 static void r600_get_query_result_resource(struct pipe_context *ctx,
1067 struct pipe_query *query,
1068 boolean wait,
1069 enum pipe_query_value_type result_type,
1070 int index,
1071 struct pipe_resource *resource,
1072 unsigned offset)
1073 {
1074 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1075 struct r600_query *rquery = (struct r600_query *)query;
1076
1077 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1078 resource, offset);
1079 }
1080
1081 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1082 union pipe_query_result *result)
1083 {
1084 util_query_clear_result(result, query->b.type);
1085 }
1086
1087 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1088 struct r600_query *rquery,
1089 bool wait, union pipe_query_result *result)
1090 {
1091 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1092 struct r600_query_buffer *qbuf;
1093
1094 query->ops->clear_result(query, result);
1095
1096 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1097 unsigned results_base = 0;
1098 void *map;
1099
1100 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1101 PIPE_TRANSFER_READ |
1102 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1103 if (!map)
1104 return false;
1105
1106 while (results_base != qbuf->results_end) {
1107 query->ops->add_result(rctx, query, map + results_base,
1108 result);
1109 results_base += query->result_size;
1110 }
1111 }
1112
1113 /* Convert the time to expected units. */
1114 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1115 rquery->type == PIPE_QUERY_TIMESTAMP) {
1116 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1117 }
1118 return true;
1119 }
1120
1121 /* Create the compute shader that is used to collect the results.
1122 *
1123 * One compute grid with a single thread is launched for every query result
1124 * buffer. The thread (optionally) reads a previous summary buffer, then
1125 * accumulates data from the query result buffer, and writes the result either
1126 * to a summary buffer to be consumed by the next grid invocation or to the
1127 * user-supplied buffer.
1128 *
1129 * Data layout:
1130 *
1131 * CONST
1132 * 0.x = end_offset
1133 * 0.y = result_stride
1134 * 0.z = result_count
1135 * 0.w = bit field:
1136 * 1: read previously accumulated values
1137 * 2: write accumulated values for chaining
1138 * 4: write result available
1139 * 8: convert result to boolean (0/1)
1140 * 16: only read one dword and use that as result
1141 * 32: apply timestamp conversion
1142 * 64: store full 64 bits result
1143 * 128: store signed 32 bits result
1144 * 1.x = fence_offset
1145 * 1.y = pair_stride
1146 * 1.z = pair_count
1147 *
1148 * BUFFER[0] = query result buffer
1149 * BUFFER[1] = previous summary buffer
1150 * BUFFER[2] = next summary buffer or user-supplied buffer
1151 */
1152 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1153 {
1154 /* TEMP[0].xy = accumulated result so far
1155 * TEMP[0].z = result not available
1156 *
1157 * TEMP[1].x = current result index
1158 * TEMP[1].y = current pair index
1159 */
1160 static const char text_tmpl[] =
1161 "COMP\n"
1162 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1163 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1164 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1165 "DCL BUFFER[0]\n"
1166 "DCL BUFFER[1]\n"
1167 "DCL BUFFER[2]\n"
1168 "DCL CONST[0..1]\n"
1169 "DCL TEMP[0..5]\n"
1170 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1171 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1172 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1173 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1174
1175 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1176 "UIF TEMP[5]\n"
1177 /* Check result availability. */
1178 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1179 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1180 "MOV TEMP[1], TEMP[0].zzzz\n"
1181 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1182
1183 /* Load result if available. */
1184 "UIF TEMP[1]\n"
1185 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1186 "ENDIF\n"
1187 "ELSE\n"
1188 /* Load previously accumulated result if requested. */
1189 "MOV TEMP[0], IMM[0].xxxx\n"
1190 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1191 "UIF TEMP[4]\n"
1192 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1193 "ENDIF\n"
1194
1195 "MOV TEMP[1].x, IMM[0].xxxx\n"
1196 "BGNLOOP\n"
1197 /* Break if accumulated result so far is not available. */
1198 "UIF TEMP[0].zzzz\n"
1199 "BRK\n"
1200 "ENDIF\n"
1201
1202 /* Break if result_index >= result_count. */
1203 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1204 "UIF TEMP[5]\n"
1205 "BRK\n"
1206 "ENDIF\n"
1207
1208 /* Load fence and check result availability */
1209 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1210 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1211 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1212 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1213 "UIF TEMP[0].zzzz\n"
1214 "BRK\n"
1215 "ENDIF\n"
1216
1217 "MOV TEMP[1].y, IMM[0].xxxx\n"
1218 "BGNLOOP\n"
1219 /* Load start and end. */
1220 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1221 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1222 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1223
1224 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
1225 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
1226
1227 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1228 "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
1229
1230 /* Increment pair index */
1231 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1232 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1233 "UIF TEMP[5]\n"
1234 "BRK\n"
1235 "ENDIF\n"
1236 "ENDLOOP\n"
1237
1238 /* Increment result index */
1239 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1240 "ENDLOOP\n"
1241 "ENDIF\n"
1242
1243 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1244 "UIF TEMP[4]\n"
1245 /* Store accumulated data for chaining. */
1246 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1247 "ELSE\n"
1248 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1249 "UIF TEMP[4]\n"
1250 /* Store result availability. */
1251 "NOT TEMP[0].z, TEMP[0]\n"
1252 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1253 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1254
1255 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1256 "UIF TEMP[4]\n"
1257 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1258 "ENDIF\n"
1259 "ELSE\n"
1260 /* Store result if it is available. */
1261 "NOT TEMP[4], TEMP[0].zzzz\n"
1262 "UIF TEMP[4]\n"
1263 /* Apply timestamp conversion */
1264 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1265 "UIF TEMP[4]\n"
1266 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1267 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1268 "ENDIF\n"
1269
1270 /* Convert to boolean */
1271 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1272 "UIF TEMP[4]\n"
1273 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
1274 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1275 "MOV TEMP[0].y, IMM[0].xxxx\n"
1276 "ENDIF\n"
1277
1278 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1279 "UIF TEMP[4]\n"
1280 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1281 "ELSE\n"
1282 /* Clamping */
1283 "UIF TEMP[0].yyyy\n"
1284 "MOV TEMP[0].x, IMM[0].wwww\n"
1285 "ENDIF\n"
1286
1287 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1288 "UIF TEMP[4]\n"
1289 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1290 "ENDIF\n"
1291
1292 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1293 "ENDIF\n"
1294 "ENDIF\n"
1295 "ENDIF\n"
1296 "ENDIF\n"
1297
1298 "END\n";
1299
1300 char text[sizeof(text_tmpl) + 32];
1301 struct tgsi_token tokens[1024];
1302 struct pipe_compute_state state = {};
1303
1304 /* Hard code the frequency into the shader so that the backend can
1305 * use the full range of optimizations for divide-by-constant.
1306 */
1307 snprintf(text, sizeof(text), text_tmpl,
1308 rctx->screen->info.clock_crystal_freq);
1309
1310 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1311 assert(false);
1312 return;
1313 }
1314
1315 state.ir_type = PIPE_SHADER_IR_TGSI;
1316 state.prog = tokens;
1317
1318 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1319 }
1320
1321 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1322 struct r600_qbo_state *st)
1323 {
1324 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1325
1326 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1327 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1328
1329 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1330 for (unsigned i = 0; i < 3; ++i)
1331 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1332 }
1333
1334 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1335 struct r600_query *rquery,
1336 bool wait,
1337 enum pipe_query_value_type result_type,
1338 int index,
1339 struct pipe_resource *resource,
1340 unsigned offset)
1341 {
1342 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1343 struct r600_query_buffer *qbuf;
1344 struct r600_query_buffer *qbuf_prev;
1345 struct pipe_resource *tmp_buffer = NULL;
1346 unsigned tmp_buffer_offset = 0;
1347 struct r600_qbo_state saved_state = {};
1348 struct pipe_grid_info grid = {};
1349 struct pipe_constant_buffer constant_buffer = {};
1350 struct pipe_shader_buffer ssbo[3];
1351 struct r600_hw_query_params params;
1352 struct {
1353 uint32_t end_offset;
1354 uint32_t result_stride;
1355 uint32_t result_count;
1356 uint32_t config;
1357 uint32_t fence_offset;
1358 uint32_t pair_stride;
1359 uint32_t pair_count;
1360 } consts;
1361
1362 if (!rctx->query_result_shader) {
1363 r600_create_query_result_shader(rctx);
1364 if (!rctx->query_result_shader)
1365 return;
1366 }
1367
1368 if (query->buffer.previous) {
1369 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1370 &tmp_buffer_offset, &tmp_buffer);
1371 if (!tmp_buffer)
1372 return;
1373 }
1374
1375 rctx->save_qbo_state(&rctx->b, &saved_state);
1376
1377 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1378 consts.end_offset = params.end_offset - params.start_offset;
1379 consts.fence_offset = params.fence_offset - params.start_offset;
1380 consts.result_stride = query->result_size;
1381 consts.pair_stride = params.pair_stride;
1382 consts.pair_count = params.pair_count;
1383
1384 constant_buffer.buffer_size = sizeof(consts);
1385 constant_buffer.user_buffer = &consts;
1386
1387 ssbo[1].buffer = tmp_buffer;
1388 ssbo[1].buffer_offset = tmp_buffer_offset;
1389 ssbo[1].buffer_size = 16;
1390
1391 ssbo[2] = ssbo[1];
1392
1393 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1394
1395 grid.block[0] = 1;
1396 grid.block[1] = 1;
1397 grid.block[2] = 1;
1398 grid.grid[0] = 1;
1399 grid.grid[1] = 1;
1400 grid.grid[2] = 1;
1401
1402 consts.config = 0;
1403 if (index < 0)
1404 consts.config |= 4;
1405 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1406 query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
1407 consts.config |= 8;
1408 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1409 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1410 consts.config |= 32;
1411
1412 switch (result_type) {
1413 case PIPE_QUERY_TYPE_U64:
1414 case PIPE_QUERY_TYPE_I64:
1415 consts.config |= 64;
1416 break;
1417 case PIPE_QUERY_TYPE_I32:
1418 consts.config |= 128;
1419 break;
1420 case PIPE_QUERY_TYPE_U32:
1421 break;
1422 }
1423
1424 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1425
1426 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1427 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1428 qbuf_prev = qbuf->previous;
1429 consts.result_count = qbuf->results_end / query->result_size;
1430 consts.config &= ~3;
1431 if (qbuf != &query->buffer)
1432 consts.config |= 1;
1433 if (qbuf->previous)
1434 consts.config |= 2;
1435 } else {
1436 /* Only read the last timestamp. */
1437 qbuf_prev = NULL;
1438 consts.result_count = 0;
1439 consts.config |= 16;
1440 params.start_offset += qbuf->results_end - query->result_size;
1441 }
1442
1443 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1444
1445 ssbo[0].buffer = &qbuf->buf->b.b;
1446 ssbo[0].buffer_offset = params.start_offset;
1447 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1448
1449 if (!qbuf->previous) {
1450 ssbo[2].buffer = resource;
1451 ssbo[2].buffer_offset = offset;
1452 ssbo[2].buffer_size = 8;
1453
1454 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1455 }
1456
1457 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1458
1459 if (wait && qbuf == &query->buffer) {
1460 uint64_t va;
1461
1462 /* Wait for result availability. Wait only for readiness
1463 * of the last entry, since the fence writes should be
1464 * serialized in the CP.
1465 */
1466 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1467 va += params.fence_offset;
1468
1469 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1470 }
1471
1472 rctx->b.launch_grid(&rctx->b, &grid);
1473 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1474 }
1475
1476 r600_restore_qbo_state(rctx, &saved_state);
1477 pipe_resource_reference(&tmp_buffer, NULL);
1478 }
1479
1480 static void r600_render_condition(struct pipe_context *ctx,
1481 struct pipe_query *query,
1482 boolean condition,
1483 uint mode)
1484 {
1485 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1486 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1487 struct r600_query_buffer *qbuf;
1488 struct r600_atom *atom = &rctx->render_cond_atom;
1489
1490 rctx->render_cond = query;
1491 rctx->render_cond_invert = condition;
1492 rctx->render_cond_mode = mode;
1493
1494 /* Compute the size of SET_PREDICATION packets. */
1495 atom->num_dw = 0;
1496 if (query) {
1497 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1498 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1499 }
1500
1501 rctx->set_atom_dirty(rctx, atom, query != NULL);
1502 }
1503
1504 void r600_suspend_queries(struct r600_common_context *ctx)
1505 {
1506 struct r600_query_hw *query;
1507
1508 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1509 r600_query_hw_emit_stop(ctx, query);
1510 }
1511 assert(ctx->num_cs_dw_queries_suspend == 0);
1512 }
1513
1514 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1515 struct list_head *query_list)
1516 {
1517 struct r600_query_hw *query;
1518 unsigned num_dw = 0;
1519
1520 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1521 /* begin + end */
1522 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1523
1524 /* Workaround for the fact that
1525 * num_cs_dw_nontimer_queries_suspend is incremented for every
1526 * resumed query, which raises the bar in need_cs_space for
1527 * queries about to be resumed.
1528 */
1529 num_dw += query->num_cs_dw_end;
1530 }
1531 /* primitives generated query */
1532 num_dw += ctx->streamout.enable_atom.num_dw;
1533 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1534 num_dw += 13;
1535
1536 return num_dw;
1537 }
1538
1539 void r600_resume_queries(struct r600_common_context *ctx)
1540 {
1541 struct r600_query_hw *query;
1542 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1543
1544 assert(ctx->num_cs_dw_queries_suspend == 0);
1545
1546 /* Check CS space here. Resuming must not be interrupted by flushes. */
1547 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1548
1549 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1550 r600_query_hw_emit_start(ctx, query);
1551 }
1552 }
1553
1554 /* Get backends mask */
1555 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1556 {
1557 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1558 struct r600_resource *buffer;
1559 uint32_t *results;
1560 unsigned num_backends = ctx->screen->info.num_render_backends;
1561 unsigned i, mask = 0;
1562
1563 /* if backend_map query is supported by the kernel */
1564 if (ctx->screen->info.r600_gb_backend_map_valid) {
1565 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1566 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1567 unsigned item_width, item_mask;
1568
1569 if (ctx->chip_class >= EVERGREEN) {
1570 item_width = 4;
1571 item_mask = 0x7;
1572 } else {
1573 item_width = 2;
1574 item_mask = 0x3;
1575 }
1576
1577 while (num_tile_pipes--) {
1578 i = backend_map & item_mask;
1579 mask |= (1<<i);
1580 backend_map >>= item_width;
1581 }
1582 if (mask != 0) {
1583 ctx->backend_mask = mask;
1584 return;
1585 }
1586 }
1587
1588 /* otherwise backup path for older kernels */
1589
1590 /* create buffer for event data */
1591 buffer = (struct r600_resource*)
1592 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1593 PIPE_USAGE_STAGING, ctx->max_db*16);
1594 if (!buffer)
1595 goto err;
1596
1597 /* initialize buffer with zeroes */
1598 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1599 if (results) {
1600 memset(results, 0, ctx->max_db * 4 * 4);
1601
1602 /* emit EVENT_WRITE for ZPASS_DONE */
1603 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1604 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1605 radeon_emit(cs, buffer->gpu_address);
1606 radeon_emit(cs, buffer->gpu_address >> 32);
1607
1608 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1609 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1610
1611 /* analyze results */
1612 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1613 if (results) {
1614 for(i = 0; i < ctx->max_db; i++) {
1615 /* at least highest bit will be set if backend is used */
1616 if (results[i*4 + 1])
1617 mask |= (1<<i);
1618 }
1619 }
1620 }
1621
1622 r600_resource_reference(&buffer, NULL);
1623
1624 if (mask != 0) {
1625 ctx->backend_mask = mask;
1626 return;
1627 }
1628
1629 err:
1630 /* fallback to old method - set num_backends lower bits to 1 */
1631 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1632 return;
1633 }
1634
1635 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1636 { \
1637 .name = name_, \
1638 .query_type = R600_QUERY_##query_type_, \
1639 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1640 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1641 .group_id = group_id_ \
1642 }
1643
1644 #define X(name_, query_type_, type_, result_type_) \
1645 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1646
1647 #define XG(group_, name_, query_type_, type_, result_type_) \
1648 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1649
1650 static struct pipe_driver_query_info r600_driver_query_list[] = {
1651 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1652 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1653 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1654 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1655 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1656 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1657 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1658 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1659 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1660 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1661 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1662 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1663 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1664 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1665 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1666 X("num-ctx-flushes", NUM_CTX_FLUSHES, UINT64, AVERAGE),
1667 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1668 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1669 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1670 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1671 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1672
1673 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1674 * which use it as a fallback path to detect the GPU type.
1675 *
1676 * Note: The names of these queries are significant for GPUPerfStudio
1677 * (and possibly their order as well). */
1678 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1679 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1680 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1681 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1682 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1683
1684 /* The following queries must be at the end of the list because their
1685 * availability is adjusted dynamically based on the DRM version. */
1686 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1687 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1688 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1689 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1690 };
1691
1692 #undef X
1693 #undef XG
1694 #undef XFULL
1695
1696 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1697 {
1698 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1699 return ARRAY_SIZE(r600_driver_query_list);
1700 else if (rscreen->info.drm_major == 3)
1701 return ARRAY_SIZE(r600_driver_query_list) - 3;
1702 else
1703 return ARRAY_SIZE(r600_driver_query_list) - 4;
1704 }
1705
1706 static int r600_get_driver_query_info(struct pipe_screen *screen,
1707 unsigned index,
1708 struct pipe_driver_query_info *info)
1709 {
1710 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1711 unsigned num_queries = r600_get_num_queries(rscreen);
1712
1713 if (!info) {
1714 unsigned num_perfcounters =
1715 r600_get_perfcounter_info(rscreen, 0, NULL);
1716
1717 return num_queries + num_perfcounters;
1718 }
1719
1720 if (index >= num_queries)
1721 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1722
1723 *info = r600_driver_query_list[index];
1724
1725 switch (info->query_type) {
1726 case R600_QUERY_REQUESTED_VRAM:
1727 case R600_QUERY_VRAM_USAGE:
1728 case R600_QUERY_MAPPED_VRAM:
1729 info->max_value.u64 = rscreen->info.vram_size;
1730 break;
1731 case R600_QUERY_REQUESTED_GTT:
1732 case R600_QUERY_GTT_USAGE:
1733 case R600_QUERY_MAPPED_GTT:
1734 info->max_value.u64 = rscreen->info.gart_size;
1735 break;
1736 case R600_QUERY_GPU_TEMPERATURE:
1737 info->max_value.u64 = 125;
1738 break;
1739 }
1740
1741 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1742 info->group_id += rscreen->perfcounters->num_groups;
1743
1744 return 1;
1745 }
1746
1747 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1748 * performance counter groups, so be careful when changing this and related
1749 * functions.
1750 */
1751 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1752 unsigned index,
1753 struct pipe_driver_query_group_info *info)
1754 {
1755 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1756 unsigned num_pc_groups = 0;
1757
1758 if (rscreen->perfcounters)
1759 num_pc_groups = rscreen->perfcounters->num_groups;
1760
1761 if (!info)
1762 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1763
1764 if (index < num_pc_groups)
1765 return r600_get_perfcounter_group_info(rscreen, index, info);
1766
1767 index -= num_pc_groups;
1768 if (index >= R600_NUM_SW_QUERY_GROUPS)
1769 return 0;
1770
1771 info->name = "GPIN";
1772 info->max_active_queries = 5;
1773 info->num_queries = 5;
1774 return 1;
1775 }
1776
1777 void r600_query_init(struct r600_common_context *rctx)
1778 {
1779 rctx->b.create_query = r600_create_query;
1780 rctx->b.create_batch_query = r600_create_batch_query;
1781 rctx->b.destroy_query = r600_destroy_query;
1782 rctx->b.begin_query = r600_begin_query;
1783 rctx->b.end_query = r600_end_query;
1784 rctx->b.get_query_result = r600_get_query_result;
1785 rctx->b.get_query_result_resource = r600_get_query_result_resource;
1786 rctx->render_cond_atom.emit = r600_emit_query_predication;
1787
1788 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1789 rctx->b.render_condition = r600_render_condition;
1790
1791 LIST_INITHEAD(&rctx->active_queries);
1792 }
1793
1794 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1795 {
1796 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1797 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1798 }