radeon: cleanup driver query list
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_cs.h"
26 #include "util/u_memory.h"
27
28
29 struct r600_query_buffer {
30 /* The buffer where query results are stored. */
31 struct r600_resource *buf;
32 /* Offset of the next free result after current query data */
33 unsigned results_end;
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer *previous;
38 };
39
40 struct r600_query {
41 /* The query buffer and how many results are in it. */
42 struct r600_query_buffer buffer;
43 /* The type of query */
44 unsigned type;
45 /* Size of the result in memory for both begin_query and end_query,
46 * this can be one or two numbers, or it could even be a size of a structure. */
47 unsigned result_size;
48 /* The number of dwords for begin_query or end_query. */
49 unsigned num_cs_dw;
50 /* linked list of queries */
51 struct list_head list;
52 /* for custom non-GPU queries */
53 uint64_t begin_result;
54 uint64_t end_result;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle *fence;
57 /* For transform feedback: which stream the query is for */
58 unsigned stream;
59 };
60
61
62 static bool r600_is_timer_query(unsigned type)
63 {
64 return type == PIPE_QUERY_TIME_ELAPSED ||
65 type == PIPE_QUERY_TIMESTAMP;
66 }
67
68 static bool r600_query_needs_begin(unsigned type)
69 {
70 return type != PIPE_QUERY_GPU_FINISHED &&
71 type != PIPE_QUERY_TIMESTAMP;
72 }
73
74 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type)
75 {
76 unsigned j, i, num_results, buf_size = 4096;
77 uint32_t *results;
78
79 /* Non-GPU queries. */
80 switch (type) {
81 case PIPE_QUERY_TIMESTAMP_DISJOINT:
82 case PIPE_QUERY_GPU_FINISHED:
83 case R600_QUERY_DRAW_CALLS:
84 case R600_QUERY_REQUESTED_VRAM:
85 case R600_QUERY_REQUESTED_GTT:
86 case R600_QUERY_BUFFER_WAIT_TIME:
87 case R600_QUERY_NUM_CS_FLUSHES:
88 case R600_QUERY_NUM_BYTES_MOVED:
89 case R600_QUERY_VRAM_USAGE:
90 case R600_QUERY_GTT_USAGE:
91 case R600_QUERY_GPU_TEMPERATURE:
92 case R600_QUERY_CURRENT_GPU_SCLK:
93 case R600_QUERY_CURRENT_GPU_MCLK:
94 case R600_QUERY_GPU_LOAD:
95 case R600_QUERY_NUM_COMPILATIONS:
96 case R600_QUERY_NUM_SHADERS_CREATED:
97 return NULL;
98 }
99
100 /* Queries are normally read by the CPU after
101 * being written by the gpu, hence staging is probably a good
102 * usage pattern.
103 */
104 struct r600_resource *buf = (struct r600_resource*)
105 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
106 PIPE_USAGE_STAGING, buf_size);
107
108 switch (type) {
109 case PIPE_QUERY_OCCLUSION_COUNTER:
110 case PIPE_QUERY_OCCLUSION_PREDICATE:
111 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
112 memset(results, 0, buf_size);
113
114 /* Set top bits for unused backends. */
115 num_results = buf_size / (16 * ctx->max_db);
116 for (j = 0; j < num_results; j++) {
117 for (i = 0; i < ctx->max_db; i++) {
118 if (!(ctx->backend_mask & (1<<i))) {
119 results[(i * 4)+1] = 0x80000000;
120 results[(i * 4)+3] = 0x80000000;
121 }
122 }
123 results += 4 * ctx->max_db;
124 }
125 break;
126 case PIPE_QUERY_TIME_ELAPSED:
127 case PIPE_QUERY_TIMESTAMP:
128 break;
129 case PIPE_QUERY_PRIMITIVES_EMITTED:
130 case PIPE_QUERY_PRIMITIVES_GENERATED:
131 case PIPE_QUERY_SO_STATISTICS:
132 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
133 case PIPE_QUERY_PIPELINE_STATISTICS:
134 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
135 memset(results, 0, buf_size);
136 break;
137 default:
138 assert(0);
139 }
140 return buf;
141 }
142
143 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
144 unsigned type, int diff)
145 {
146 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
147 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
148 bool old_enable = rctx->num_occlusion_queries != 0;
149 bool enable;
150
151 rctx->num_occlusion_queries += diff;
152 assert(rctx->num_occlusion_queries >= 0);
153
154 enable = rctx->num_occlusion_queries != 0;
155
156 if (enable != old_enable) {
157 rctx->set_occlusion_query_state(&rctx->b, enable);
158 }
159 }
160 }
161
162 static unsigned event_type_for_stream(struct r600_query *query)
163 {
164 switch (query->stream) {
165 default:
166 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
167 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
168 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
169 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
170 }
171 }
172
173 static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
174 {
175 struct radeon_winsys_cs *cs = ctx->gfx.cs;
176 uint64_t va;
177
178 r600_update_occlusion_query_state(ctx, query->type, 1);
179 r600_update_prims_generated_query_state(ctx, query->type, 1);
180 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
181
182 /* Get a new query buffer if needed. */
183 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
184 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
185 *qbuf = query->buffer;
186 query->buffer.buf = r600_new_query_buffer(ctx, query->type);
187 query->buffer.results_end = 0;
188 query->buffer.previous = qbuf;
189 }
190
191 /* emit begin query */
192 va = query->buffer.buf->gpu_address + query->buffer.results_end;
193
194 switch (query->type) {
195 case PIPE_QUERY_OCCLUSION_COUNTER:
196 case PIPE_QUERY_OCCLUSION_PREDICATE:
197 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
198 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
199 radeon_emit(cs, va);
200 radeon_emit(cs, (va >> 32) & 0xFFFF);
201 break;
202 case PIPE_QUERY_PRIMITIVES_EMITTED:
203 case PIPE_QUERY_PRIMITIVES_GENERATED:
204 case PIPE_QUERY_SO_STATISTICS:
205 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
206 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
207 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
208 radeon_emit(cs, va);
209 radeon_emit(cs, (va >> 32) & 0xFFFF);
210 break;
211 case PIPE_QUERY_TIME_ELAPSED:
212 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
213 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
214 radeon_emit(cs, va);
215 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
216 radeon_emit(cs, 0);
217 radeon_emit(cs, 0);
218 break;
219 case PIPE_QUERY_PIPELINE_STATISTICS:
220 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
221 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
222 radeon_emit(cs, va);
223 radeon_emit(cs, (va >> 32) & 0xFFFF);
224 break;
225 default:
226 assert(0);
227 }
228 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
229 RADEON_PRIO_QUERY);
230
231 if (r600_is_timer_query(query->type))
232 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
233 else
234 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
235 }
236
237 static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
238 {
239 struct radeon_winsys_cs *cs = ctx->gfx.cs;
240 uint64_t va;
241
242 /* The queries which need begin already called this in begin_query. */
243 if (!r600_query_needs_begin(query->type)) {
244 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
245 }
246
247 va = query->buffer.buf->gpu_address;
248
249 /* emit end query */
250 switch (query->type) {
251 case PIPE_QUERY_OCCLUSION_COUNTER:
252 case PIPE_QUERY_OCCLUSION_PREDICATE:
253 va += query->buffer.results_end + 8;
254 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
255 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
256 radeon_emit(cs, va);
257 radeon_emit(cs, (va >> 32) & 0xFFFF);
258 break;
259 case PIPE_QUERY_PRIMITIVES_EMITTED:
260 case PIPE_QUERY_PRIMITIVES_GENERATED:
261 case PIPE_QUERY_SO_STATISTICS:
262 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
263 va += query->buffer.results_end + query->result_size/2;
264 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
265 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
266 radeon_emit(cs, va);
267 radeon_emit(cs, (va >> 32) & 0xFFFF);
268 break;
269 case PIPE_QUERY_TIME_ELAPSED:
270 va += query->buffer.results_end + query->result_size/2;
271 /* fall through */
272 case PIPE_QUERY_TIMESTAMP:
273 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
274 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
275 radeon_emit(cs, va);
276 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
277 radeon_emit(cs, 0);
278 radeon_emit(cs, 0);
279 break;
280 case PIPE_QUERY_PIPELINE_STATISTICS:
281 va += query->buffer.results_end + query->result_size/2;
282 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
283 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
284 radeon_emit(cs, va);
285 radeon_emit(cs, (va >> 32) & 0xFFFF);
286 break;
287 default:
288 assert(0);
289 }
290 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
291 RADEON_PRIO_QUERY);
292
293 query->buffer.results_end += query->result_size;
294
295 if (r600_query_needs_begin(query->type)) {
296 if (r600_is_timer_query(query->type))
297 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
298 else
299 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
300 }
301
302 r600_update_occlusion_query_state(ctx, query->type, -1);
303 r600_update_prims_generated_query_state(ctx, query->type, -1);
304 }
305
306 static void r600_emit_query_predication(struct r600_common_context *ctx,
307 struct r600_atom *atom)
308 {
309 struct radeon_winsys_cs *cs = ctx->gfx.cs;
310 struct r600_query *query = (struct r600_query*)ctx->render_cond;
311 struct r600_query_buffer *qbuf;
312 uint32_t op;
313 bool flag_wait;
314
315 if (!query)
316 return;
317
318 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
319 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
320
321 switch (query->type) {
322 case PIPE_QUERY_OCCLUSION_COUNTER:
323 case PIPE_QUERY_OCCLUSION_PREDICATE:
324 op = PRED_OP(PREDICATION_OP_ZPASS);
325 break;
326 case PIPE_QUERY_PRIMITIVES_EMITTED:
327 case PIPE_QUERY_PRIMITIVES_GENERATED:
328 case PIPE_QUERY_SO_STATISTICS:
329 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
330 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
331 break;
332 default:
333 assert(0);
334 return;
335 }
336
337 /* if true then invert, see GL_ARB_conditional_render_inverted */
338 if (ctx->render_cond_invert)
339 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
340 else
341 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
342
343 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
344
345 /* emit predicate packets for all data blocks */
346 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
347 unsigned results_base = 0;
348 uint64_t va = qbuf->buf->gpu_address;
349
350 while (results_base < qbuf->results_end) {
351 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
352 radeon_emit(cs, va + results_base);
353 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
354 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
355 RADEON_PRIO_QUERY);
356 results_base += query->result_size;
357
358 /* set CONTINUE bit for all packets except the first */
359 op |= PREDICATION_CONTINUE;
360 }
361 }
362 }
363
364 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
365 {
366 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
367 struct r600_query *query;
368 bool skip_allocation = false;
369
370 query = CALLOC_STRUCT(r600_query);
371 if (query == NULL)
372 return NULL;
373
374 query->type = query_type;
375
376 switch (query_type) {
377 case PIPE_QUERY_OCCLUSION_COUNTER:
378 case PIPE_QUERY_OCCLUSION_PREDICATE:
379 query->result_size = 16 * rctx->max_db;
380 query->num_cs_dw = 6;
381 break;
382 break;
383 case PIPE_QUERY_TIME_ELAPSED:
384 query->result_size = 16;
385 query->num_cs_dw = 8;
386 break;
387 case PIPE_QUERY_TIMESTAMP:
388 query->result_size = 8;
389 query->num_cs_dw = 8;
390 break;
391 case PIPE_QUERY_PRIMITIVES_EMITTED:
392 case PIPE_QUERY_PRIMITIVES_GENERATED:
393 case PIPE_QUERY_SO_STATISTICS:
394 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
395 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
396 query->result_size = 32;
397 query->num_cs_dw = 6;
398 query->stream = index;
399 break;
400 case PIPE_QUERY_PIPELINE_STATISTICS:
401 /* 11 values on EG, 8 on R600. */
402 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
403 query->num_cs_dw = 6;
404 break;
405 /* Non-GPU queries and queries not requiring a buffer. */
406 case PIPE_QUERY_TIMESTAMP_DISJOINT:
407 case PIPE_QUERY_GPU_FINISHED:
408 case R600_QUERY_DRAW_CALLS:
409 case R600_QUERY_REQUESTED_VRAM:
410 case R600_QUERY_REQUESTED_GTT:
411 case R600_QUERY_BUFFER_WAIT_TIME:
412 case R600_QUERY_NUM_CS_FLUSHES:
413 case R600_QUERY_NUM_BYTES_MOVED:
414 case R600_QUERY_VRAM_USAGE:
415 case R600_QUERY_GTT_USAGE:
416 case R600_QUERY_GPU_TEMPERATURE:
417 case R600_QUERY_CURRENT_GPU_SCLK:
418 case R600_QUERY_CURRENT_GPU_MCLK:
419 case R600_QUERY_GPU_LOAD:
420 case R600_QUERY_NUM_COMPILATIONS:
421 case R600_QUERY_NUM_SHADERS_CREATED:
422 skip_allocation = true;
423 break;
424 default:
425 assert(0);
426 FREE(query);
427 return NULL;
428 }
429
430 if (!skip_allocation) {
431 query->buffer.buf = r600_new_query_buffer(rctx, query_type);
432 if (!query->buffer.buf) {
433 FREE(query);
434 return NULL;
435 }
436 }
437 return (struct pipe_query*)query;
438 }
439
440 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
441 {
442 struct r600_query *rquery = (struct r600_query*)query;
443 struct r600_query_buffer *prev = rquery->buffer.previous;
444
445 /* Release all query buffers. */
446 while (prev) {
447 struct r600_query_buffer *qbuf = prev;
448 prev = prev->previous;
449 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
450 FREE(qbuf);
451 }
452
453 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
454 FREE(query);
455 }
456
457 static boolean r600_begin_query(struct pipe_context *ctx,
458 struct pipe_query *query)
459 {
460 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
461 struct r600_query *rquery = (struct r600_query *)query;
462 struct r600_query_buffer *prev = rquery->buffer.previous;
463
464 if (!r600_query_needs_begin(rquery->type)) {
465 assert(0);
466 return false;
467 }
468
469 /* Non-GPU queries. */
470 switch (rquery->type) {
471 case PIPE_QUERY_TIMESTAMP_DISJOINT:
472 return true;
473 case R600_QUERY_DRAW_CALLS:
474 rquery->begin_result = rctx->num_draw_calls;
475 return true;
476 case R600_QUERY_REQUESTED_VRAM:
477 case R600_QUERY_REQUESTED_GTT:
478 case R600_QUERY_VRAM_USAGE:
479 case R600_QUERY_GTT_USAGE:
480 case R600_QUERY_GPU_TEMPERATURE:
481 case R600_QUERY_CURRENT_GPU_SCLK:
482 case R600_QUERY_CURRENT_GPU_MCLK:
483 rquery->begin_result = 0;
484 return true;
485 case R600_QUERY_BUFFER_WAIT_TIME:
486 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
487 return true;
488 case R600_QUERY_NUM_CS_FLUSHES:
489 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
490 return true;
491 case R600_QUERY_NUM_BYTES_MOVED:
492 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
493 return true;
494 case R600_QUERY_GPU_LOAD:
495 rquery->begin_result = r600_gpu_load_begin(rctx->screen);
496 return true;
497 case R600_QUERY_NUM_COMPILATIONS:
498 rquery->begin_result = p_atomic_read(&rctx->screen->num_compilations);
499 return true;
500 case R600_QUERY_NUM_SHADERS_CREATED:
501 rquery->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
502 return true;
503 }
504
505 /* Discard the old query buffers. */
506 while (prev) {
507 struct r600_query_buffer *qbuf = prev;
508 prev = prev->previous;
509 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
510 FREE(qbuf);
511 }
512
513 /* Obtain a new buffer if the current one can't be mapped without a stall. */
514 if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
515 !rctx->ws->buffer_wait(rquery->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
516 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
517 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
518 }
519
520 rquery->buffer.results_end = 0;
521 rquery->buffer.previous = NULL;
522
523 r600_emit_query_begin(rctx, rquery);
524
525 if (r600_is_timer_query(rquery->type))
526 LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
527 else
528 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
529 return true;
530 }
531
532 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
533 {
534 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
535 struct r600_query *rquery = (struct r600_query *)query;
536
537 /* Non-GPU queries. */
538 switch (rquery->type) {
539 case PIPE_QUERY_TIMESTAMP_DISJOINT:
540 return;
541 case PIPE_QUERY_GPU_FINISHED:
542 ctx->flush(ctx, &rquery->fence, 0);
543 return;
544 case R600_QUERY_DRAW_CALLS:
545 rquery->end_result = rctx->num_draw_calls;
546 return;
547 case R600_QUERY_REQUESTED_VRAM:
548 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY);
549 return;
550 case R600_QUERY_REQUESTED_GTT:
551 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY);
552 return;
553 case R600_QUERY_BUFFER_WAIT_TIME:
554 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
555 return;
556 case R600_QUERY_NUM_CS_FLUSHES:
557 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
558 return;
559 case R600_QUERY_NUM_BYTES_MOVED:
560 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
561 return;
562 case R600_QUERY_VRAM_USAGE:
563 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_VRAM_USAGE);
564 return;
565 case R600_QUERY_GTT_USAGE:
566 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GTT_USAGE);
567 return;
568 case R600_QUERY_GPU_TEMPERATURE:
569 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GPU_TEMPERATURE) / 1000;
570 return;
571 case R600_QUERY_CURRENT_GPU_SCLK:
572 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_SCLK) * 1000000;
573 return;
574 case R600_QUERY_CURRENT_GPU_MCLK:
575 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_MCLK) * 1000000;
576 return;
577 case R600_QUERY_GPU_LOAD:
578 rquery->end_result = r600_gpu_load_end(rctx->screen, rquery->begin_result);
579 return;
580 case R600_QUERY_NUM_COMPILATIONS:
581 rquery->end_result = p_atomic_read(&rctx->screen->num_compilations);
582 return;
583 case R600_QUERY_NUM_SHADERS_CREATED:
584 rquery->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
585 return;
586 }
587
588 r600_emit_query_end(rctx, rquery);
589
590 if (r600_query_needs_begin(rquery->type))
591 LIST_DELINIT(&rquery->list);
592 }
593
594 static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
595 bool test_status_bit)
596 {
597 uint32_t *current_result = (uint32_t*)map;
598 uint64_t start, end;
599
600 start = (uint64_t)current_result[start_index] |
601 (uint64_t)current_result[start_index+1] << 32;
602 end = (uint64_t)current_result[end_index] |
603 (uint64_t)current_result[end_index+1] << 32;
604
605 if (!test_status_bit ||
606 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
607 return end - start;
608 }
609 return 0;
610 }
611
612 static boolean r600_get_query_buffer_result(struct r600_common_context *ctx,
613 struct r600_query *query,
614 struct r600_query_buffer *qbuf,
615 boolean wait,
616 union pipe_query_result *result)
617 {
618 struct pipe_screen *screen = ctx->b.screen;
619 unsigned results_base = 0;
620 char *map;
621
622 /* Non-GPU queries. */
623 switch (query->type) {
624 case PIPE_QUERY_TIMESTAMP_DISJOINT:
625 /* Convert from cycles per millisecond to cycles per second (Hz). */
626 result->timestamp_disjoint.frequency =
627 (uint64_t)ctx->screen->info.r600_clock_crystal_freq * 1000;
628 result->timestamp_disjoint.disjoint = FALSE;
629 return TRUE;
630 case PIPE_QUERY_GPU_FINISHED:
631 result->b = screen->fence_finish(screen, query->fence,
632 wait ? PIPE_TIMEOUT_INFINITE : 0);
633 return result->b;
634 case R600_QUERY_DRAW_CALLS:
635 case R600_QUERY_REQUESTED_VRAM:
636 case R600_QUERY_REQUESTED_GTT:
637 case R600_QUERY_BUFFER_WAIT_TIME:
638 case R600_QUERY_NUM_CS_FLUSHES:
639 case R600_QUERY_NUM_BYTES_MOVED:
640 case R600_QUERY_VRAM_USAGE:
641 case R600_QUERY_GTT_USAGE:
642 case R600_QUERY_GPU_TEMPERATURE:
643 case R600_QUERY_CURRENT_GPU_SCLK:
644 case R600_QUERY_CURRENT_GPU_MCLK:
645 case R600_QUERY_NUM_COMPILATIONS:
646 case R600_QUERY_NUM_SHADERS_CREATED:
647 result->u64 = query->end_result - query->begin_result;
648 return TRUE;
649 case R600_QUERY_GPU_LOAD:
650 result->u64 = query->end_result;
651 return TRUE;
652 }
653
654 map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf,
655 PIPE_TRANSFER_READ |
656 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
657 if (!map)
658 return FALSE;
659
660 /* count all results across all data blocks */
661 switch (query->type) {
662 case PIPE_QUERY_OCCLUSION_COUNTER:
663 while (results_base != qbuf->results_end) {
664 result->u64 +=
665 r600_query_read_result(map + results_base, 0, 2, true);
666 results_base += 16;
667 }
668 break;
669 case PIPE_QUERY_OCCLUSION_PREDICATE:
670 while (results_base != qbuf->results_end) {
671 result->b = result->b ||
672 r600_query_read_result(map + results_base, 0, 2, true) != 0;
673 results_base += 16;
674 }
675 break;
676 case PIPE_QUERY_TIME_ELAPSED:
677 while (results_base != qbuf->results_end) {
678 result->u64 +=
679 r600_query_read_result(map + results_base, 0, 2, false);
680 results_base += query->result_size;
681 }
682 break;
683 case PIPE_QUERY_TIMESTAMP:
684 {
685 uint32_t *current_result = (uint32_t*)map;
686 result->u64 = (uint64_t)current_result[0] |
687 (uint64_t)current_result[1] << 32;
688 break;
689 }
690 case PIPE_QUERY_PRIMITIVES_EMITTED:
691 /* SAMPLE_STREAMOUTSTATS stores this structure:
692 * {
693 * u64 NumPrimitivesWritten;
694 * u64 PrimitiveStorageNeeded;
695 * }
696 * We only need NumPrimitivesWritten here. */
697 while (results_base != qbuf->results_end) {
698 result->u64 +=
699 r600_query_read_result(map + results_base, 2, 6, true);
700 results_base += query->result_size;
701 }
702 break;
703 case PIPE_QUERY_PRIMITIVES_GENERATED:
704 /* Here we read PrimitiveStorageNeeded. */
705 while (results_base != qbuf->results_end) {
706 result->u64 +=
707 r600_query_read_result(map + results_base, 0, 4, true);
708 results_base += query->result_size;
709 }
710 break;
711 case PIPE_QUERY_SO_STATISTICS:
712 while (results_base != qbuf->results_end) {
713 result->so_statistics.num_primitives_written +=
714 r600_query_read_result(map + results_base, 2, 6, true);
715 result->so_statistics.primitives_storage_needed +=
716 r600_query_read_result(map + results_base, 0, 4, true);
717 results_base += query->result_size;
718 }
719 break;
720 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
721 while (results_base != qbuf->results_end) {
722 result->b = result->b ||
723 r600_query_read_result(map + results_base, 2, 6, true) !=
724 r600_query_read_result(map + results_base, 0, 4, true);
725 results_base += query->result_size;
726 }
727 break;
728 case PIPE_QUERY_PIPELINE_STATISTICS:
729 if (ctx->chip_class >= EVERGREEN) {
730 while (results_base != qbuf->results_end) {
731 result->pipeline_statistics.ps_invocations +=
732 r600_query_read_result(map + results_base, 0, 22, false);
733 result->pipeline_statistics.c_primitives +=
734 r600_query_read_result(map + results_base, 2, 24, false);
735 result->pipeline_statistics.c_invocations +=
736 r600_query_read_result(map + results_base, 4, 26, false);
737 result->pipeline_statistics.vs_invocations +=
738 r600_query_read_result(map + results_base, 6, 28, false);
739 result->pipeline_statistics.gs_invocations +=
740 r600_query_read_result(map + results_base, 8, 30, false);
741 result->pipeline_statistics.gs_primitives +=
742 r600_query_read_result(map + results_base, 10, 32, false);
743 result->pipeline_statistics.ia_primitives +=
744 r600_query_read_result(map + results_base, 12, 34, false);
745 result->pipeline_statistics.ia_vertices +=
746 r600_query_read_result(map + results_base, 14, 36, false);
747 result->pipeline_statistics.hs_invocations +=
748 r600_query_read_result(map + results_base, 16, 38, false);
749 result->pipeline_statistics.ds_invocations +=
750 r600_query_read_result(map + results_base, 18, 40, false);
751 result->pipeline_statistics.cs_invocations +=
752 r600_query_read_result(map + results_base, 20, 42, false);
753 results_base += query->result_size;
754 }
755 } else {
756 while (results_base != qbuf->results_end) {
757 result->pipeline_statistics.ps_invocations +=
758 r600_query_read_result(map + results_base, 0, 16, false);
759 result->pipeline_statistics.c_primitives +=
760 r600_query_read_result(map + results_base, 2, 18, false);
761 result->pipeline_statistics.c_invocations +=
762 r600_query_read_result(map + results_base, 4, 20, false);
763 result->pipeline_statistics.vs_invocations +=
764 r600_query_read_result(map + results_base, 6, 22, false);
765 result->pipeline_statistics.gs_invocations +=
766 r600_query_read_result(map + results_base, 8, 24, false);
767 result->pipeline_statistics.gs_primitives +=
768 r600_query_read_result(map + results_base, 10, 26, false);
769 result->pipeline_statistics.ia_primitives +=
770 r600_query_read_result(map + results_base, 12, 28, false);
771 result->pipeline_statistics.ia_vertices +=
772 r600_query_read_result(map + results_base, 14, 30, false);
773 results_base += query->result_size;
774 }
775 }
776 #if 0 /* for testing */
777 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
778 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
779 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
780 result->pipeline_statistics.ia_vertices,
781 result->pipeline_statistics.ia_primitives,
782 result->pipeline_statistics.vs_invocations,
783 result->pipeline_statistics.hs_invocations,
784 result->pipeline_statistics.ds_invocations,
785 result->pipeline_statistics.gs_invocations,
786 result->pipeline_statistics.gs_primitives,
787 result->pipeline_statistics.c_invocations,
788 result->pipeline_statistics.c_primitives,
789 result->pipeline_statistics.ps_invocations,
790 result->pipeline_statistics.cs_invocations);
791 #endif
792 break;
793 default:
794 assert(0);
795 }
796
797 return TRUE;
798 }
799
800 static boolean r600_get_query_result(struct pipe_context *ctx,
801 struct pipe_query *query,
802 boolean wait, union pipe_query_result *result)
803 {
804 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
805 struct r600_query *rquery = (struct r600_query *)query;
806 struct r600_query_buffer *qbuf;
807
808 util_query_clear_result(result, rquery->type);
809
810 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
811 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
812 return FALSE;
813 }
814 }
815
816 /* Convert the time to expected units. */
817 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
818 rquery->type == PIPE_QUERY_TIMESTAMP) {
819 result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
820 }
821 return TRUE;
822 }
823
824 static void r600_render_condition(struct pipe_context *ctx,
825 struct pipe_query *query,
826 boolean condition,
827 uint mode)
828 {
829 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
830 struct r600_query *rquery = (struct r600_query*)query;
831 struct r600_query_buffer *qbuf;
832 struct r600_atom *atom = &rctx->render_cond_atom;
833
834 rctx->render_cond = query;
835 rctx->render_cond_invert = condition;
836 rctx->render_cond_mode = mode;
837
838 /* Compute the size of SET_PREDICATION packets. */
839 atom->num_dw = 0;
840 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
841 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
842
843 rctx->set_atom_dirty(rctx, atom, query != NULL);
844 }
845
846 static void r600_suspend_queries(struct r600_common_context *ctx,
847 struct list_head *query_list,
848 unsigned *num_cs_dw_queries_suspend)
849 {
850 struct r600_query *query;
851
852 LIST_FOR_EACH_ENTRY(query, query_list, list) {
853 r600_emit_query_end(ctx, query);
854 }
855 assert(*num_cs_dw_queries_suspend == 0);
856 }
857
858 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
859 {
860 r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
861 &ctx->num_cs_dw_nontimer_queries_suspend);
862 }
863
864 void r600_suspend_timer_queries(struct r600_common_context *ctx)
865 {
866 r600_suspend_queries(ctx, &ctx->active_timer_queries,
867 &ctx->num_cs_dw_timer_queries_suspend);
868 }
869
870 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
871 struct list_head *query_list)
872 {
873 struct r600_query *query;
874 unsigned num_dw = 0;
875
876 LIST_FOR_EACH_ENTRY(query, query_list, list) {
877 /* begin + end */
878 num_dw += query->num_cs_dw * 2;
879
880 /* Workaround for the fact that
881 * num_cs_dw_nontimer_queries_suspend is incremented for every
882 * resumed query, which raises the bar in need_cs_space for
883 * queries about to be resumed.
884 */
885 num_dw += query->num_cs_dw;
886 }
887 /* primitives generated query */
888 num_dw += ctx->streamout.enable_atom.num_dw;
889 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
890 num_dw += 13;
891
892 return num_dw;
893 }
894
895 static void r600_resume_queries(struct r600_common_context *ctx,
896 struct list_head *query_list,
897 unsigned *num_cs_dw_queries_suspend)
898 {
899 struct r600_query *query;
900 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
901
902 assert(*num_cs_dw_queries_suspend == 0);
903
904 /* Check CS space here. Resuming must not be interrupted by flushes. */
905 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
906
907 LIST_FOR_EACH_ENTRY(query, query_list, list) {
908 r600_emit_query_begin(ctx, query);
909 }
910 }
911
912 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
913 {
914 r600_resume_queries(ctx, &ctx->active_nontimer_queries,
915 &ctx->num_cs_dw_nontimer_queries_suspend);
916 }
917
918 void r600_resume_timer_queries(struct r600_common_context *ctx)
919 {
920 r600_resume_queries(ctx, &ctx->active_timer_queries,
921 &ctx->num_cs_dw_timer_queries_suspend);
922 }
923
924 /* Get backends mask */
925 void r600_query_init_backend_mask(struct r600_common_context *ctx)
926 {
927 struct radeon_winsys_cs *cs = ctx->gfx.cs;
928 struct r600_resource *buffer;
929 uint32_t *results;
930 unsigned num_backends = ctx->screen->info.r600_num_backends;
931 unsigned i, mask = 0;
932
933 /* if backend_map query is supported by the kernel */
934 if (ctx->screen->info.r600_backend_map_valid) {
935 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
936 unsigned backend_map = ctx->screen->info.r600_backend_map;
937 unsigned item_width, item_mask;
938
939 if (ctx->chip_class >= EVERGREEN) {
940 item_width = 4;
941 item_mask = 0x7;
942 } else {
943 item_width = 2;
944 item_mask = 0x3;
945 }
946
947 while(num_tile_pipes--) {
948 i = backend_map & item_mask;
949 mask |= (1<<i);
950 backend_map >>= item_width;
951 }
952 if (mask != 0) {
953 ctx->backend_mask = mask;
954 return;
955 }
956 }
957
958 /* otherwise backup path for older kernels */
959
960 /* create buffer for event data */
961 buffer = (struct r600_resource*)
962 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
963 PIPE_USAGE_STAGING, ctx->max_db*16);
964 if (!buffer)
965 goto err;
966
967 /* initialize buffer with zeroes */
968 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
969 if (results) {
970 memset(results, 0, ctx->max_db * 4 * 4);
971
972 /* emit EVENT_WRITE for ZPASS_DONE */
973 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
974 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
975 radeon_emit(cs, buffer->gpu_address);
976 radeon_emit(cs, buffer->gpu_address >> 32);
977
978 r600_emit_reloc(ctx, &ctx->gfx, buffer,
979 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
980
981 /* analyze results */
982 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
983 if (results) {
984 for(i = 0; i < ctx->max_db; i++) {
985 /* at least highest bit will be set if backend is used */
986 if (results[i*4 + 1])
987 mask |= (1<<i);
988 }
989 }
990 }
991
992 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
993
994 if (mask != 0) {
995 ctx->backend_mask = mask;
996 return;
997 }
998
999 err:
1000 /* fallback to old method - set num_backends lower bits to 1 */
1001 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1002 return;
1003 }
1004
1005 #define X(name_, query_type_, type_, result_type_) \
1006 { \
1007 .name = name_, \
1008 .query_type = R600_QUERY_##query_type_, \
1009 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1010 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1011 .group_id = ~(unsigned)0 \
1012 }
1013
1014 static struct pipe_driver_query_info r600_driver_query_list[] = {
1015 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1016 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1017 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1018 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1019 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1020 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1021 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1022 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1023 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1024 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1025 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1026 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1027 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1028 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1029 };
1030
1031 #undef X
1032
1033 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1034 {
1035 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1036 return Elements(r600_driver_query_list);
1037 else if (rscreen->info.drm_major == 3)
1038 return Elements(r600_driver_query_list) - 3;
1039 else
1040 return Elements(r600_driver_query_list) - 4;
1041 }
1042
1043 static int r600_get_driver_query_info(struct pipe_screen *screen,
1044 unsigned index,
1045 struct pipe_driver_query_info *info)
1046 {
1047 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1048 unsigned num_queries = r600_get_num_queries(rscreen);
1049
1050 if (!info)
1051 return num_queries;
1052
1053 if (index >= num_queries)
1054 return 0;
1055
1056 *info = r600_driver_query_list[index];
1057
1058 switch (info->query_type) {
1059 case R600_QUERY_REQUESTED_VRAM:
1060 case R600_QUERY_VRAM_USAGE:
1061 info->max_value.u64 = rscreen->info.vram_size;
1062 break;
1063 case R600_QUERY_REQUESTED_GTT:
1064 case R600_QUERY_GTT_USAGE:
1065 info->max_value.u64 = rscreen->info.gart_size;
1066 break;
1067 case R600_QUERY_GPU_TEMPERATURE:
1068 info->max_value.u64 = 125;
1069 break;
1070 }
1071
1072 return 1;
1073 }
1074
1075 void r600_query_init(struct r600_common_context *rctx)
1076 {
1077 rctx->b.create_query = r600_create_query;
1078 rctx->b.destroy_query = r600_destroy_query;
1079 rctx->b.begin_query = r600_begin_query;
1080 rctx->b.end_query = r600_end_query;
1081 rctx->b.get_query_result = r600_get_query_result;
1082 rctx->render_cond_atom.emit = r600_emit_query_predication;
1083
1084 if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
1085 rctx->b.render_condition = r600_render_condition;
1086
1087 LIST_INITHEAD(&rctx->active_nontimer_queries);
1088 LIST_INITHEAD(&rctx->active_timer_queries);
1089 }
1090
1091 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1092 {
1093 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1094 }