gallium/radeon: cleanup getting PIPE_QUERY_TIMESTAMP result
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29 struct r600_hw_query_params {
30 unsigned start_offset;
31 unsigned end_offset;
32 unsigned fence_offset;
33 unsigned pair_stride;
34 unsigned pair_count;
35 };
36
37 /* Queries without buffer handling or suspend/resume. */
38 struct r600_query_sw {
39 struct r600_query b;
40
41 uint64_t begin_result;
42 uint64_t end_result;
43 /* Fence for GPU_FINISHED. */
44 struct pipe_fence_handle *fence;
45 };
46
47 static void r600_query_sw_destroy(struct r600_common_context *rctx,
48 struct r600_query *rquery)
49 {
50 struct pipe_screen *screen = rctx->b.screen;
51 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
52
53 screen->fence_reference(screen, &query->fence, NULL);
54 FREE(query);
55 }
56
57 static enum radeon_value_id winsys_id_from_type(unsigned type)
58 {
59 switch (type) {
60 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
61 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
62 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
63 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
64 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
65 case R600_QUERY_NUM_CTX_FLUSHES: return RADEON_NUM_CS_FLUSHES;
66 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
67 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
68 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
69 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
70 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
71 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
72 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
73 default: unreachable("query type does not correspond to winsys id");
74 }
75 }
76
77 static bool r600_query_sw_begin(struct r600_common_context *rctx,
78 struct r600_query *rquery)
79 {
80 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
81
82 switch(query->b.type) {
83 case PIPE_QUERY_TIMESTAMP_DISJOINT:
84 case PIPE_QUERY_GPU_FINISHED:
85 break;
86 case R600_QUERY_DRAW_CALLS:
87 query->begin_result = rctx->num_draw_calls;
88 break;
89 case R600_QUERY_SPILL_DRAW_CALLS:
90 query->begin_result = rctx->num_spill_draw_calls;
91 break;
92 case R600_QUERY_COMPUTE_CALLS:
93 query->begin_result = rctx->num_compute_calls;
94 break;
95 case R600_QUERY_SPILL_COMPUTE_CALLS:
96 query->begin_result = rctx->num_spill_compute_calls;
97 break;
98 case R600_QUERY_DMA_CALLS:
99 query->begin_result = rctx->num_dma_calls;
100 break;
101 case R600_QUERY_NUM_VS_FLUSHES:
102 query->begin_result = rctx->num_vs_flushes;
103 break;
104 case R600_QUERY_NUM_PS_FLUSHES:
105 query->begin_result = rctx->num_ps_flushes;
106 break;
107 case R600_QUERY_NUM_CS_FLUSHES:
108 query->begin_result = rctx->num_cs_flushes;
109 break;
110 case R600_QUERY_REQUESTED_VRAM:
111 case R600_QUERY_REQUESTED_GTT:
112 case R600_QUERY_MAPPED_VRAM:
113 case R600_QUERY_MAPPED_GTT:
114 case R600_QUERY_VRAM_USAGE:
115 case R600_QUERY_GTT_USAGE:
116 case R600_QUERY_GPU_TEMPERATURE:
117 case R600_QUERY_CURRENT_GPU_SCLK:
118 case R600_QUERY_CURRENT_GPU_MCLK:
119 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
120 query->begin_result = 0;
121 break;
122 case R600_QUERY_BUFFER_WAIT_TIME:
123 case R600_QUERY_NUM_CTX_FLUSHES:
124 case R600_QUERY_NUM_BYTES_MOVED:
125 case R600_QUERY_NUM_EVICTIONS: {
126 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
127 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
128 break;
129 }
130 case R600_QUERY_GPU_LOAD:
131 query->begin_result = r600_gpu_load_begin(rctx->screen);
132 break;
133 case R600_QUERY_NUM_COMPILATIONS:
134 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
135 break;
136 case R600_QUERY_NUM_SHADERS_CREATED:
137 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
138 break;
139 case R600_QUERY_GPIN_ASIC_ID:
140 case R600_QUERY_GPIN_NUM_SIMD:
141 case R600_QUERY_GPIN_NUM_RB:
142 case R600_QUERY_GPIN_NUM_SPI:
143 case R600_QUERY_GPIN_NUM_SE:
144 break;
145 default:
146 unreachable("r600_query_sw_begin: bad query type");
147 }
148
149 return true;
150 }
151
152 static bool r600_query_sw_end(struct r600_common_context *rctx,
153 struct r600_query *rquery)
154 {
155 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
156
157 switch(query->b.type) {
158 case PIPE_QUERY_TIMESTAMP_DISJOINT:
159 break;
160 case PIPE_QUERY_GPU_FINISHED:
161 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
162 break;
163 case R600_QUERY_DRAW_CALLS:
164 query->end_result = rctx->num_draw_calls;
165 break;
166 case R600_QUERY_SPILL_DRAW_CALLS:
167 query->end_result = rctx->num_spill_draw_calls;
168 break;
169 case R600_QUERY_COMPUTE_CALLS:
170 query->end_result = rctx->num_compute_calls;
171 break;
172 case R600_QUERY_SPILL_COMPUTE_CALLS:
173 query->end_result = rctx->num_spill_compute_calls;
174 break;
175 case R600_QUERY_DMA_CALLS:
176 query->end_result = rctx->num_dma_calls;
177 break;
178 case R600_QUERY_NUM_VS_FLUSHES:
179 query->end_result = rctx->num_vs_flushes;
180 break;
181 case R600_QUERY_NUM_PS_FLUSHES:
182 query->end_result = rctx->num_ps_flushes;
183 break;
184 case R600_QUERY_NUM_CS_FLUSHES:
185 query->end_result = rctx->num_cs_flushes;
186 break;
187 case R600_QUERY_REQUESTED_VRAM:
188 case R600_QUERY_REQUESTED_GTT:
189 case R600_QUERY_MAPPED_VRAM:
190 case R600_QUERY_MAPPED_GTT:
191 case R600_QUERY_VRAM_USAGE:
192 case R600_QUERY_GTT_USAGE:
193 case R600_QUERY_GPU_TEMPERATURE:
194 case R600_QUERY_CURRENT_GPU_SCLK:
195 case R600_QUERY_CURRENT_GPU_MCLK:
196 case R600_QUERY_BUFFER_WAIT_TIME:
197 case R600_QUERY_NUM_CTX_FLUSHES:
198 case R600_QUERY_NUM_BYTES_MOVED:
199 case R600_QUERY_NUM_EVICTIONS: {
200 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
201 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
202 break;
203 }
204 case R600_QUERY_GPU_LOAD:
205 query->end_result = r600_gpu_load_end(rctx->screen,
206 query->begin_result);
207 query->begin_result = 0;
208 break;
209 case R600_QUERY_NUM_COMPILATIONS:
210 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
211 break;
212 case R600_QUERY_NUM_SHADERS_CREATED:
213 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
214 break;
215 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
216 query->end_result = rctx->last_tex_ps_draw_ratio;
217 break;
218 case R600_QUERY_GPIN_ASIC_ID:
219 case R600_QUERY_GPIN_NUM_SIMD:
220 case R600_QUERY_GPIN_NUM_RB:
221 case R600_QUERY_GPIN_NUM_SPI:
222 case R600_QUERY_GPIN_NUM_SE:
223 break;
224 default:
225 unreachable("r600_query_sw_end: bad query type");
226 }
227
228 return true;
229 }
230
231 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
232 struct r600_query *rquery,
233 bool wait,
234 union pipe_query_result *result)
235 {
236 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
237
238 switch (query->b.type) {
239 case PIPE_QUERY_TIMESTAMP_DISJOINT:
240 /* Convert from cycles per millisecond to cycles per second (Hz). */
241 result->timestamp_disjoint.frequency =
242 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
243 result->timestamp_disjoint.disjoint = false;
244 return true;
245 case PIPE_QUERY_GPU_FINISHED: {
246 struct pipe_screen *screen = rctx->b.screen;
247 result->b = screen->fence_finish(screen, &rctx->b, query->fence,
248 wait ? PIPE_TIMEOUT_INFINITE : 0);
249 return result->b;
250 }
251
252 case R600_QUERY_GPIN_ASIC_ID:
253 result->u32 = 0;
254 return true;
255 case R600_QUERY_GPIN_NUM_SIMD:
256 result->u32 = rctx->screen->info.num_good_compute_units;
257 return true;
258 case R600_QUERY_GPIN_NUM_RB:
259 result->u32 = rctx->screen->info.num_render_backends;
260 return true;
261 case R600_QUERY_GPIN_NUM_SPI:
262 result->u32 = 1; /* all supported chips have one SPI per SE */
263 return true;
264 case R600_QUERY_GPIN_NUM_SE:
265 result->u32 = rctx->screen->info.max_se;
266 return true;
267 }
268
269 result->u64 = query->end_result - query->begin_result;
270
271 switch (query->b.type) {
272 case R600_QUERY_BUFFER_WAIT_TIME:
273 case R600_QUERY_GPU_TEMPERATURE:
274 result->u64 /= 1000;
275 break;
276 case R600_QUERY_CURRENT_GPU_SCLK:
277 case R600_QUERY_CURRENT_GPU_MCLK:
278 result->u64 *= 1000000;
279 break;
280 }
281
282 return true;
283 }
284
285 static struct r600_query_ops sw_query_ops = {
286 .destroy = r600_query_sw_destroy,
287 .begin = r600_query_sw_begin,
288 .end = r600_query_sw_end,
289 .get_result = r600_query_sw_get_result
290 };
291
292 static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
293 unsigned query_type)
294 {
295 struct r600_query_sw *query;
296
297 query = CALLOC_STRUCT(r600_query_sw);
298 if (!query)
299 return NULL;
300
301 query->b.type = query_type;
302 query->b.ops = &sw_query_ops;
303
304 return (struct pipe_query *)query;
305 }
306
307 void r600_query_hw_destroy(struct r600_common_context *rctx,
308 struct r600_query *rquery)
309 {
310 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
311 struct r600_query_buffer *prev = query->buffer.previous;
312
313 /* Release all query buffers. */
314 while (prev) {
315 struct r600_query_buffer *qbuf = prev;
316 prev = prev->previous;
317 r600_resource_reference(&qbuf->buf, NULL);
318 FREE(qbuf);
319 }
320
321 r600_resource_reference(&query->buffer.buf, NULL);
322 FREE(rquery);
323 }
324
325 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
326 struct r600_query_hw *query)
327 {
328 unsigned buf_size = MAX2(query->result_size,
329 ctx->screen->info.gart_page_size);
330
331 /* Queries are normally read by the CPU after
332 * being written by the gpu, hence staging is probably a good
333 * usage pattern.
334 */
335 struct r600_resource *buf = (struct r600_resource*)
336 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
337 PIPE_USAGE_STAGING, buf_size);
338 if (!buf)
339 return NULL;
340
341 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
342 if (!query->ops->prepare_buffer(ctx, query, buf)) {
343 r600_resource_reference(&buf, NULL);
344 return NULL;
345 }
346 }
347
348 return buf;
349 }
350
351 static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
352 struct r600_query_hw *query,
353 struct r600_resource *buffer)
354 {
355 /* Callers ensure that the buffer is currently unused by the GPU. */
356 uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
357 PIPE_TRANSFER_WRITE |
358 PIPE_TRANSFER_UNSYNCHRONIZED);
359 if (!results)
360 return false;
361
362 memset(results, 0, buffer->b.b.width0);
363
364 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
365 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
366 unsigned num_results;
367 unsigned i, j;
368
369 /* Set top bits for unused backends. */
370 num_results = buffer->b.b.width0 / query->result_size;
371 for (j = 0; j < num_results; j++) {
372 for (i = 0; i < ctx->max_db; i++) {
373 if (!(ctx->backend_mask & (1<<i))) {
374 results[(i * 4)+1] = 0x80000000;
375 results[(i * 4)+3] = 0x80000000;
376 }
377 }
378 results += 4 * ctx->max_db;
379 }
380 }
381
382 return true;
383 }
384
385 static struct r600_query_ops query_hw_ops = {
386 .destroy = r600_query_hw_destroy,
387 .begin = r600_query_hw_begin,
388 .end = r600_query_hw_end,
389 .get_result = r600_query_hw_get_result,
390 };
391
392 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
393 struct r600_query_hw *query,
394 struct r600_resource *buffer,
395 uint64_t va);
396 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
397 struct r600_query_hw *query,
398 struct r600_resource *buffer,
399 uint64_t va);
400 static void r600_query_hw_add_result(struct r600_common_context *ctx,
401 struct r600_query_hw *, void *buffer,
402 union pipe_query_result *result);
403 static void r600_query_hw_clear_result(struct r600_query_hw *,
404 union pipe_query_result *);
405
406 static struct r600_query_hw_ops query_hw_default_hw_ops = {
407 .prepare_buffer = r600_query_hw_prepare_buffer,
408 .emit_start = r600_query_hw_do_emit_start,
409 .emit_stop = r600_query_hw_do_emit_stop,
410 .clear_result = r600_query_hw_clear_result,
411 .add_result = r600_query_hw_add_result,
412 };
413
414 bool r600_query_hw_init(struct r600_common_context *rctx,
415 struct r600_query_hw *query)
416 {
417 query->buffer.buf = r600_new_query_buffer(rctx, query);
418 if (!query->buffer.buf)
419 return false;
420
421 return true;
422 }
423
424 static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
425 unsigned query_type,
426 unsigned index)
427 {
428 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
429 if (!query)
430 return NULL;
431
432 query->b.type = query_type;
433 query->b.ops = &query_hw_ops;
434 query->ops = &query_hw_default_hw_ops;
435
436 switch (query_type) {
437 case PIPE_QUERY_OCCLUSION_COUNTER:
438 case PIPE_QUERY_OCCLUSION_PREDICATE:
439 query->result_size = 16 * rctx->max_db;
440 query->result_size += 16; /* for the fence + alignment */
441 query->num_cs_dw_begin = 6;
442 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
443 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
444 break;
445 case PIPE_QUERY_TIME_ELAPSED:
446 query->result_size = 24;
447 query->num_cs_dw_begin = 8;
448 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
449 break;
450 case PIPE_QUERY_TIMESTAMP:
451 query->result_size = 16;
452 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
453 query->flags = R600_QUERY_HW_FLAG_NO_START;
454 break;
455 case PIPE_QUERY_PRIMITIVES_EMITTED:
456 case PIPE_QUERY_PRIMITIVES_GENERATED:
457 case PIPE_QUERY_SO_STATISTICS:
458 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
459 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
460 query->result_size = 32;
461 query->num_cs_dw_begin = 6;
462 query->num_cs_dw_end = 6;
463 query->stream = index;
464 query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
465 break;
466 case PIPE_QUERY_PIPELINE_STATISTICS:
467 /* 11 values on EG, 8 on R600. */
468 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
469 query->result_size += 8; /* for the fence + alignment */
470 query->num_cs_dw_begin = 6;
471 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
472 break;
473 default:
474 assert(0);
475 FREE(query);
476 return NULL;
477 }
478
479 if (!r600_query_hw_init(rctx, query)) {
480 FREE(query);
481 return NULL;
482 }
483
484 return (struct pipe_query *)query;
485 }
486
487 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
488 unsigned type, int diff)
489 {
490 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
491 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
492 bool old_enable = rctx->num_occlusion_queries != 0;
493 bool old_perfect_enable =
494 rctx->num_perfect_occlusion_queries != 0;
495 bool enable, perfect_enable;
496
497 rctx->num_occlusion_queries += diff;
498 assert(rctx->num_occlusion_queries >= 0);
499
500 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
501 rctx->num_perfect_occlusion_queries += diff;
502 assert(rctx->num_perfect_occlusion_queries >= 0);
503 }
504
505 enable = rctx->num_occlusion_queries != 0;
506 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
507
508 if (enable != old_enable || perfect_enable != old_perfect_enable) {
509 rctx->set_occlusion_query_state(&rctx->b, enable);
510 }
511 }
512 }
513
514 static unsigned event_type_for_stream(struct r600_query_hw *query)
515 {
516 switch (query->stream) {
517 default:
518 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
519 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
520 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
521 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
522 }
523 }
524
525 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
526 struct r600_query_hw *query,
527 struct r600_resource *buffer,
528 uint64_t va)
529 {
530 struct radeon_winsys_cs *cs = ctx->gfx.cs;
531
532 switch (query->b.type) {
533 case PIPE_QUERY_OCCLUSION_COUNTER:
534 case PIPE_QUERY_OCCLUSION_PREDICATE:
535 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
536 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
537 radeon_emit(cs, va);
538 radeon_emit(cs, (va >> 32) & 0xFFFF);
539 break;
540 case PIPE_QUERY_PRIMITIVES_EMITTED:
541 case PIPE_QUERY_PRIMITIVES_GENERATED:
542 case PIPE_QUERY_SO_STATISTICS:
543 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
544 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
545 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
546 radeon_emit(cs, va);
547 radeon_emit(cs, (va >> 32) & 0xFFFF);
548 break;
549 case PIPE_QUERY_TIME_ELAPSED:
550 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
551 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
552 radeon_emit(cs, va);
553 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
554 radeon_emit(cs, 0);
555 radeon_emit(cs, 0);
556 break;
557 case PIPE_QUERY_PIPELINE_STATISTICS:
558 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
559 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
560 radeon_emit(cs, va);
561 radeon_emit(cs, (va >> 32) & 0xFFFF);
562 break;
563 default:
564 assert(0);
565 }
566 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
567 RADEON_PRIO_QUERY);
568 }
569
570 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
571 struct r600_query_hw *query)
572 {
573 uint64_t va;
574
575 if (!query->buffer.buf)
576 return; // previous buffer allocation failure
577
578 r600_update_occlusion_query_state(ctx, query->b.type, 1);
579 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
580
581 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
582 true);
583
584 /* Get a new query buffer if needed. */
585 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
586 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
587 *qbuf = query->buffer;
588 query->buffer.results_end = 0;
589 query->buffer.previous = qbuf;
590 query->buffer.buf = r600_new_query_buffer(ctx, query);
591 if (!query->buffer.buf)
592 return;
593 }
594
595 /* emit begin query */
596 va = query->buffer.buf->gpu_address + query->buffer.results_end;
597
598 query->ops->emit_start(ctx, query, query->buffer.buf, va);
599
600 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
601 }
602
603 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
604 struct r600_query_hw *query,
605 struct r600_resource *buffer,
606 uint64_t va)
607 {
608 struct radeon_winsys_cs *cs = ctx->gfx.cs;
609
610 switch (query->b.type) {
611 case PIPE_QUERY_OCCLUSION_COUNTER:
612 case PIPE_QUERY_OCCLUSION_PREDICATE:
613 va += 8;
614 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
615 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
616 radeon_emit(cs, va);
617 radeon_emit(cs, (va >> 32) & 0xFFFF);
618
619 va += ctx->max_db * 16 - 8;
620 r600_gfx_write_fence(ctx, va, 0, 0x80000000);
621 break;
622 case PIPE_QUERY_PRIMITIVES_EMITTED:
623 case PIPE_QUERY_PRIMITIVES_GENERATED:
624 case PIPE_QUERY_SO_STATISTICS:
625 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
626 va += query->result_size/2;
627 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
628 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
629 radeon_emit(cs, va);
630 radeon_emit(cs, (va >> 32) & 0xFFFF);
631 break;
632 case PIPE_QUERY_TIME_ELAPSED:
633 va += 8;
634 /* fall through */
635 case PIPE_QUERY_TIMESTAMP:
636 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
637 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
638 radeon_emit(cs, va);
639 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
640 radeon_emit(cs, 0);
641 radeon_emit(cs, 0);
642
643 va += 8;
644 r600_gfx_write_fence(ctx, va, 0, 0x80000000);
645 break;
646 case PIPE_QUERY_PIPELINE_STATISTICS: {
647 unsigned sample_size = (query->result_size - 8) / 2;
648
649 va += sample_size;
650 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
651 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
652 radeon_emit(cs, va);
653 radeon_emit(cs, (va >> 32) & 0xFFFF);
654
655 va += sample_size;
656 r600_gfx_write_fence(ctx, va, 0, 0x80000000);
657 break;
658 }
659 default:
660 assert(0);
661 }
662 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
663 RADEON_PRIO_QUERY);
664 }
665
666 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
667 struct r600_query_hw *query)
668 {
669 uint64_t va;
670
671 if (!query->buffer.buf)
672 return; // previous buffer allocation failure
673
674 /* The queries which need begin already called this in begin_query. */
675 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
676 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
677 }
678
679 /* emit end query */
680 va = query->buffer.buf->gpu_address + query->buffer.results_end;
681
682 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
683
684 query->buffer.results_end += query->result_size;
685
686 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
687 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
688
689 r600_update_occlusion_query_state(ctx, query->b.type, -1);
690 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
691 }
692
693 static void r600_emit_query_predication(struct r600_common_context *ctx,
694 struct r600_atom *atom)
695 {
696 struct radeon_winsys_cs *cs = ctx->gfx.cs;
697 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
698 struct r600_query_buffer *qbuf;
699 uint32_t op;
700 bool flag_wait;
701
702 if (!query)
703 return;
704
705 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
706 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
707
708 switch (query->b.type) {
709 case PIPE_QUERY_OCCLUSION_COUNTER:
710 case PIPE_QUERY_OCCLUSION_PREDICATE:
711 op = PRED_OP(PREDICATION_OP_ZPASS);
712 break;
713 case PIPE_QUERY_PRIMITIVES_EMITTED:
714 case PIPE_QUERY_PRIMITIVES_GENERATED:
715 case PIPE_QUERY_SO_STATISTICS:
716 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
717 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
718 break;
719 default:
720 assert(0);
721 return;
722 }
723
724 /* if true then invert, see GL_ARB_conditional_render_inverted */
725 if (ctx->render_cond_invert)
726 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
727 else
728 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
729
730 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
731
732 /* emit predicate packets for all data blocks */
733 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
734 unsigned results_base = 0;
735 uint64_t va = qbuf->buf->gpu_address;
736
737 while (results_base < qbuf->results_end) {
738 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
739 radeon_emit(cs, va + results_base);
740 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
741 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
742 RADEON_PRIO_QUERY);
743 results_base += query->result_size;
744
745 /* set CONTINUE bit for all packets except the first */
746 op |= PREDICATION_CONTINUE;
747 }
748 }
749 }
750
751 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
752 {
753 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
754
755 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
756 query_type == PIPE_QUERY_GPU_FINISHED ||
757 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
758 return r600_query_sw_create(ctx, query_type);
759
760 return r600_query_hw_create(rctx, query_type, index);
761 }
762
763 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
764 {
765 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
766 struct r600_query *rquery = (struct r600_query *)query;
767
768 rquery->ops->destroy(rctx, rquery);
769 }
770
771 static boolean r600_begin_query(struct pipe_context *ctx,
772 struct pipe_query *query)
773 {
774 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
775 struct r600_query *rquery = (struct r600_query *)query;
776
777 return rquery->ops->begin(rctx, rquery);
778 }
779
780 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
781 struct r600_query_hw *query)
782 {
783 struct r600_query_buffer *prev = query->buffer.previous;
784
785 /* Discard the old query buffers. */
786 while (prev) {
787 struct r600_query_buffer *qbuf = prev;
788 prev = prev->previous;
789 r600_resource_reference(&qbuf->buf, NULL);
790 FREE(qbuf);
791 }
792
793 query->buffer.results_end = 0;
794 query->buffer.previous = NULL;
795
796 if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
797 /* Obtain a new buffer if the current one can't be mapped without a stall. */
798 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
799 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
800 r600_resource_reference(&query->buffer.buf, NULL);
801 query->buffer.buf = r600_new_query_buffer(rctx, query);
802 } else {
803 if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
804 r600_resource_reference(&query->buffer.buf, NULL);
805 }
806 }
807 }
808
809 bool r600_query_hw_begin(struct r600_common_context *rctx,
810 struct r600_query *rquery)
811 {
812 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
813
814 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
815 assert(0);
816 return false;
817 }
818
819 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
820 r600_query_hw_reset_buffers(rctx, query);
821
822 r600_query_hw_emit_start(rctx, query);
823 if (!query->buffer.buf)
824 return false;
825
826 LIST_ADDTAIL(&query->list, &rctx->active_queries);
827 return true;
828 }
829
830 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
831 {
832 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
833 struct r600_query *rquery = (struct r600_query *)query;
834
835 return rquery->ops->end(rctx, rquery);
836 }
837
838 bool r600_query_hw_end(struct r600_common_context *rctx,
839 struct r600_query *rquery)
840 {
841 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
842
843 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
844 r600_query_hw_reset_buffers(rctx, query);
845
846 r600_query_hw_emit_stop(rctx, query);
847
848 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
849 LIST_DELINIT(&query->list);
850
851 if (!query->buffer.buf)
852 return false;
853
854 return true;
855 }
856
857 static void r600_get_hw_query_params(struct r600_common_context *rctx,
858 struct r600_query_hw *rquery, int index,
859 struct r600_hw_query_params *params)
860 {
861 params->pair_stride = 0;
862 params->pair_count = 1;
863
864 switch (rquery->b.type) {
865 case PIPE_QUERY_OCCLUSION_COUNTER:
866 case PIPE_QUERY_OCCLUSION_PREDICATE:
867 params->start_offset = 0;
868 params->end_offset = 8;
869 params->fence_offset = rctx->max_db * 16;
870 params->pair_stride = 16;
871 params->pair_count = rctx->max_db;
872 break;
873 case PIPE_QUERY_TIME_ELAPSED:
874 params->start_offset = 0;
875 params->end_offset = 8;
876 params->fence_offset = 16;
877 break;
878 case PIPE_QUERY_TIMESTAMP:
879 params->start_offset = 0;
880 params->end_offset = 0;
881 params->fence_offset = 8;
882 break;
883 case PIPE_QUERY_PRIMITIVES_EMITTED:
884 params->start_offset = 8;
885 params->end_offset = 24;
886 params->fence_offset = params->end_offset + 4;
887 break;
888 case PIPE_QUERY_PRIMITIVES_GENERATED:
889 params->start_offset = 0;
890 params->end_offset = 16;
891 params->fence_offset = params->end_offset + 4;
892 break;
893 case PIPE_QUERY_SO_STATISTICS:
894 params->start_offset = 8 - index * 8;
895 params->end_offset = 24 - index * 8;
896 params->fence_offset = params->end_offset + 4;
897 break;
898 case PIPE_QUERY_PIPELINE_STATISTICS:
899 {
900 /* Offsets apply to EG+ */
901 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
902 params->start_offset = offsets[index];
903 params->end_offset = 88 + offsets[index];
904 params->fence_offset = 2 * 88;
905 break;
906 }
907 default:
908 unreachable("r600_get_hw_query_params unsupported");
909 }
910 }
911
912 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
913 bool test_status_bit)
914 {
915 uint32_t *current_result = (uint32_t*)map;
916 uint64_t start, end;
917
918 start = (uint64_t)current_result[start_index] |
919 (uint64_t)current_result[start_index+1] << 32;
920 end = (uint64_t)current_result[end_index] |
921 (uint64_t)current_result[end_index+1] << 32;
922
923 if (!test_status_bit ||
924 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
925 return end - start;
926 }
927 return 0;
928 }
929
930 static void r600_query_hw_add_result(struct r600_common_context *ctx,
931 struct r600_query_hw *query,
932 void *buffer,
933 union pipe_query_result *result)
934 {
935 switch (query->b.type) {
936 case PIPE_QUERY_OCCLUSION_COUNTER: {
937 for (unsigned i = 0; i < ctx->max_db; ++i) {
938 unsigned results_base = i * 16;
939 result->u64 +=
940 r600_query_read_result(buffer + results_base, 0, 2, true);
941 }
942 break;
943 }
944 case PIPE_QUERY_OCCLUSION_PREDICATE: {
945 for (unsigned i = 0; i < ctx->max_db; ++i) {
946 unsigned results_base = i * 16;
947 result->b = result->b ||
948 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
949 }
950 break;
951 }
952 case PIPE_QUERY_TIME_ELAPSED:
953 result->u64 += r600_query_read_result(buffer, 0, 2, false);
954 break;
955 case PIPE_QUERY_TIMESTAMP:
956 result->u64 = *(uint64_t*)buffer;
957 break;
958 case PIPE_QUERY_PRIMITIVES_EMITTED:
959 /* SAMPLE_STREAMOUTSTATS stores this structure:
960 * {
961 * u64 NumPrimitivesWritten;
962 * u64 PrimitiveStorageNeeded;
963 * }
964 * We only need NumPrimitivesWritten here. */
965 result->u64 += r600_query_read_result(buffer, 2, 6, true);
966 break;
967 case PIPE_QUERY_PRIMITIVES_GENERATED:
968 /* Here we read PrimitiveStorageNeeded. */
969 result->u64 += r600_query_read_result(buffer, 0, 4, true);
970 break;
971 case PIPE_QUERY_SO_STATISTICS:
972 result->so_statistics.num_primitives_written +=
973 r600_query_read_result(buffer, 2, 6, true);
974 result->so_statistics.primitives_storage_needed +=
975 r600_query_read_result(buffer, 0, 4, true);
976 break;
977 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
978 result->b = result->b ||
979 r600_query_read_result(buffer, 2, 6, true) !=
980 r600_query_read_result(buffer, 0, 4, true);
981 break;
982 case PIPE_QUERY_PIPELINE_STATISTICS:
983 if (ctx->chip_class >= EVERGREEN) {
984 result->pipeline_statistics.ps_invocations +=
985 r600_query_read_result(buffer, 0, 22, false);
986 result->pipeline_statistics.c_primitives +=
987 r600_query_read_result(buffer, 2, 24, false);
988 result->pipeline_statistics.c_invocations +=
989 r600_query_read_result(buffer, 4, 26, false);
990 result->pipeline_statistics.vs_invocations +=
991 r600_query_read_result(buffer, 6, 28, false);
992 result->pipeline_statistics.gs_invocations +=
993 r600_query_read_result(buffer, 8, 30, false);
994 result->pipeline_statistics.gs_primitives +=
995 r600_query_read_result(buffer, 10, 32, false);
996 result->pipeline_statistics.ia_primitives +=
997 r600_query_read_result(buffer, 12, 34, false);
998 result->pipeline_statistics.ia_vertices +=
999 r600_query_read_result(buffer, 14, 36, false);
1000 result->pipeline_statistics.hs_invocations +=
1001 r600_query_read_result(buffer, 16, 38, false);
1002 result->pipeline_statistics.ds_invocations +=
1003 r600_query_read_result(buffer, 18, 40, false);
1004 result->pipeline_statistics.cs_invocations +=
1005 r600_query_read_result(buffer, 20, 42, false);
1006 } else {
1007 result->pipeline_statistics.ps_invocations +=
1008 r600_query_read_result(buffer, 0, 16, false);
1009 result->pipeline_statistics.c_primitives +=
1010 r600_query_read_result(buffer, 2, 18, false);
1011 result->pipeline_statistics.c_invocations +=
1012 r600_query_read_result(buffer, 4, 20, false);
1013 result->pipeline_statistics.vs_invocations +=
1014 r600_query_read_result(buffer, 6, 22, false);
1015 result->pipeline_statistics.gs_invocations +=
1016 r600_query_read_result(buffer, 8, 24, false);
1017 result->pipeline_statistics.gs_primitives +=
1018 r600_query_read_result(buffer, 10, 26, false);
1019 result->pipeline_statistics.ia_primitives +=
1020 r600_query_read_result(buffer, 12, 28, false);
1021 result->pipeline_statistics.ia_vertices +=
1022 r600_query_read_result(buffer, 14, 30, false);
1023 }
1024 #if 0 /* for testing */
1025 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1026 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1027 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1028 result->pipeline_statistics.ia_vertices,
1029 result->pipeline_statistics.ia_primitives,
1030 result->pipeline_statistics.vs_invocations,
1031 result->pipeline_statistics.hs_invocations,
1032 result->pipeline_statistics.ds_invocations,
1033 result->pipeline_statistics.gs_invocations,
1034 result->pipeline_statistics.gs_primitives,
1035 result->pipeline_statistics.c_invocations,
1036 result->pipeline_statistics.c_primitives,
1037 result->pipeline_statistics.ps_invocations,
1038 result->pipeline_statistics.cs_invocations);
1039 #endif
1040 break;
1041 default:
1042 assert(0);
1043 }
1044 }
1045
1046 static boolean r600_get_query_result(struct pipe_context *ctx,
1047 struct pipe_query *query, boolean wait,
1048 union pipe_query_result *result)
1049 {
1050 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1051 struct r600_query *rquery = (struct r600_query *)query;
1052
1053 return rquery->ops->get_result(rctx, rquery, wait, result);
1054 }
1055
1056 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1057 union pipe_query_result *result)
1058 {
1059 util_query_clear_result(result, query->b.type);
1060 }
1061
1062 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1063 struct r600_query *rquery,
1064 bool wait, union pipe_query_result *result)
1065 {
1066 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1067 struct r600_query_buffer *qbuf;
1068
1069 query->ops->clear_result(query, result);
1070
1071 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1072 unsigned results_base = 0;
1073 void *map;
1074
1075 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
1076 PIPE_TRANSFER_READ |
1077 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
1078 if (!map)
1079 return false;
1080
1081 while (results_base != qbuf->results_end) {
1082 query->ops->add_result(rctx, query, map + results_base,
1083 result);
1084 results_base += query->result_size;
1085 }
1086 }
1087
1088 /* Convert the time to expected units. */
1089 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1090 rquery->type == PIPE_QUERY_TIMESTAMP) {
1091 result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
1092 }
1093 return true;
1094 }
1095
1096 static void r600_render_condition(struct pipe_context *ctx,
1097 struct pipe_query *query,
1098 boolean condition,
1099 uint mode)
1100 {
1101 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1102 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1103 struct r600_query_buffer *qbuf;
1104 struct r600_atom *atom = &rctx->render_cond_atom;
1105
1106 rctx->render_cond = query;
1107 rctx->render_cond_invert = condition;
1108 rctx->render_cond_mode = mode;
1109
1110 /* Compute the size of SET_PREDICATION packets. */
1111 atom->num_dw = 0;
1112 if (query) {
1113 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1114 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1115 }
1116
1117 rctx->set_atom_dirty(rctx, atom, query != NULL);
1118 }
1119
1120 void r600_suspend_queries(struct r600_common_context *ctx)
1121 {
1122 struct r600_query_hw *query;
1123
1124 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1125 r600_query_hw_emit_stop(ctx, query);
1126 }
1127 assert(ctx->num_cs_dw_queries_suspend == 0);
1128 }
1129
1130 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1131 struct list_head *query_list)
1132 {
1133 struct r600_query_hw *query;
1134 unsigned num_dw = 0;
1135
1136 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1137 /* begin + end */
1138 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1139
1140 /* Workaround for the fact that
1141 * num_cs_dw_nontimer_queries_suspend is incremented for every
1142 * resumed query, which raises the bar in need_cs_space for
1143 * queries about to be resumed.
1144 */
1145 num_dw += query->num_cs_dw_end;
1146 }
1147 /* primitives generated query */
1148 num_dw += ctx->streamout.enable_atom.num_dw;
1149 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1150 num_dw += 13;
1151
1152 return num_dw;
1153 }
1154
1155 void r600_resume_queries(struct r600_common_context *ctx)
1156 {
1157 struct r600_query_hw *query;
1158 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1159
1160 assert(ctx->num_cs_dw_queries_suspend == 0);
1161
1162 /* Check CS space here. Resuming must not be interrupted by flushes. */
1163 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1164
1165 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1166 r600_query_hw_emit_start(ctx, query);
1167 }
1168 }
1169
1170 /* Get backends mask */
1171 void r600_query_init_backend_mask(struct r600_common_context *ctx)
1172 {
1173 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1174 struct r600_resource *buffer;
1175 uint32_t *results;
1176 unsigned num_backends = ctx->screen->info.num_render_backends;
1177 unsigned i, mask = 0;
1178
1179 /* if backend_map query is supported by the kernel */
1180 if (ctx->screen->info.r600_gb_backend_map_valid) {
1181 unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
1182 unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
1183 unsigned item_width, item_mask;
1184
1185 if (ctx->chip_class >= EVERGREEN) {
1186 item_width = 4;
1187 item_mask = 0x7;
1188 } else {
1189 item_width = 2;
1190 item_mask = 0x3;
1191 }
1192
1193 while (num_tile_pipes--) {
1194 i = backend_map & item_mask;
1195 mask |= (1<<i);
1196 backend_map >>= item_width;
1197 }
1198 if (mask != 0) {
1199 ctx->backend_mask = mask;
1200 return;
1201 }
1202 }
1203
1204 /* otherwise backup path for older kernels */
1205
1206 /* create buffer for event data */
1207 buffer = (struct r600_resource*)
1208 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
1209 PIPE_USAGE_STAGING, ctx->max_db*16);
1210 if (!buffer)
1211 goto err;
1212
1213 /* initialize buffer with zeroes */
1214 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1215 if (results) {
1216 memset(results, 0, ctx->max_db * 4 * 4);
1217
1218 /* emit EVENT_WRITE for ZPASS_DONE */
1219 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1220 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1221 radeon_emit(cs, buffer->gpu_address);
1222 radeon_emit(cs, buffer->gpu_address >> 32);
1223
1224 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1225 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1226
1227 /* analyze results */
1228 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1229 if (results) {
1230 for(i = 0; i < ctx->max_db; i++) {
1231 /* at least highest bit will be set if backend is used */
1232 if (results[i*4 + 1])
1233 mask |= (1<<i);
1234 }
1235 }
1236 }
1237
1238 r600_resource_reference(&buffer, NULL);
1239
1240 if (mask != 0) {
1241 ctx->backend_mask = mask;
1242 return;
1243 }
1244
1245 err:
1246 /* fallback to old method - set num_backends lower bits to 1 */
1247 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1248 return;
1249 }
1250
1251 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1252 { \
1253 .name = name_, \
1254 .query_type = R600_QUERY_##query_type_, \
1255 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1256 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1257 .group_id = group_id_ \
1258 }
1259
1260 #define X(name_, query_type_, type_, result_type_) \
1261 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1262
1263 #define XG(group_, name_, query_type_, type_, result_type_) \
1264 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1265
1266 static struct pipe_driver_query_info r600_driver_query_list[] = {
1267 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1268 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1269 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
1270 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
1271 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
1272 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
1273 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
1274 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
1275 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
1276 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
1277 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1278 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1279 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
1280 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
1281 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1282 X("num-ctx-flushes", NUM_CTX_FLUSHES, UINT64, AVERAGE),
1283 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1284 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
1285 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1286 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1287 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1288
1289 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1290 * which use it as a fallback path to detect the GPU type.
1291 *
1292 * Note: The names of these queries are significant for GPUPerfStudio
1293 * (and possibly their order as well). */
1294 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
1295 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
1296 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
1297 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
1298 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
1299
1300 /* The following queries must be at the end of the list because their
1301 * availability is adjusted dynamically based on the DRM version. */
1302 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1303 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1304 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1305 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1306 };
1307
1308 #undef X
1309 #undef XG
1310 #undef XFULL
1311
1312 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1313 {
1314 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1315 return ARRAY_SIZE(r600_driver_query_list);
1316 else if (rscreen->info.drm_major == 3)
1317 return ARRAY_SIZE(r600_driver_query_list) - 3;
1318 else
1319 return ARRAY_SIZE(r600_driver_query_list) - 4;
1320 }
1321
1322 static int r600_get_driver_query_info(struct pipe_screen *screen,
1323 unsigned index,
1324 struct pipe_driver_query_info *info)
1325 {
1326 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1327 unsigned num_queries = r600_get_num_queries(rscreen);
1328
1329 if (!info) {
1330 unsigned num_perfcounters =
1331 r600_get_perfcounter_info(rscreen, 0, NULL);
1332
1333 return num_queries + num_perfcounters;
1334 }
1335
1336 if (index >= num_queries)
1337 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
1338
1339 *info = r600_driver_query_list[index];
1340
1341 switch (info->query_type) {
1342 case R600_QUERY_REQUESTED_VRAM:
1343 case R600_QUERY_VRAM_USAGE:
1344 case R600_QUERY_MAPPED_VRAM:
1345 info->max_value.u64 = rscreen->info.vram_size;
1346 break;
1347 case R600_QUERY_REQUESTED_GTT:
1348 case R600_QUERY_GTT_USAGE:
1349 case R600_QUERY_MAPPED_GTT:
1350 info->max_value.u64 = rscreen->info.gart_size;
1351 break;
1352 case R600_QUERY_GPU_TEMPERATURE:
1353 info->max_value.u64 = 125;
1354 break;
1355 }
1356
1357 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
1358 info->group_id += rscreen->perfcounters->num_groups;
1359
1360 return 1;
1361 }
1362
1363 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1364 * performance counter groups, so be careful when changing this and related
1365 * functions.
1366 */
1367 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
1368 unsigned index,
1369 struct pipe_driver_query_group_info *info)
1370 {
1371 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1372 unsigned num_pc_groups = 0;
1373
1374 if (rscreen->perfcounters)
1375 num_pc_groups = rscreen->perfcounters->num_groups;
1376
1377 if (!info)
1378 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
1379
1380 if (index < num_pc_groups)
1381 return r600_get_perfcounter_group_info(rscreen, index, info);
1382
1383 index -= num_pc_groups;
1384 if (index >= R600_NUM_SW_QUERY_GROUPS)
1385 return 0;
1386
1387 info->name = "GPIN";
1388 info->max_active_queries = 5;
1389 info->num_queries = 5;
1390 return 1;
1391 }
1392
1393 void r600_query_init(struct r600_common_context *rctx)
1394 {
1395 rctx->b.create_query = r600_create_query;
1396 rctx->b.create_batch_query = r600_create_batch_query;
1397 rctx->b.destroy_query = r600_destroy_query;
1398 rctx->b.begin_query = r600_begin_query;
1399 rctx->b.end_query = r600_end_query;
1400 rctx->b.get_query_result = r600_get_query_result;
1401 rctx->render_cond_atom.emit = r600_emit_query_predication;
1402
1403 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
1404 rctx->b.render_condition = r600_render_condition;
1405
1406 LIST_INITHEAD(&rctx->active_queries);
1407 }
1408
1409 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1410 {
1411 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1412 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
1413 }