radeon: move R600_QUERY_* constants into a new query header file
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28
29
30 struct r600_query_buffer {
31 /* The buffer where query results are stored. */
32 struct r600_resource *buf;
33 /* Offset of the next free result after current query data */
34 unsigned results_end;
35 /* If a query buffer is full, a new buffer is created and the old one
36 * is put in here. When we calculate the result, we sum up the samples
37 * from all buffers. */
38 struct r600_query_buffer *previous;
39 };
40
41 struct r600_query {
42 /* The query buffer and how many results are in it. */
43 struct r600_query_buffer buffer;
44 /* The type of query */
45 unsigned type;
46 /* Size of the result in memory for both begin_query and end_query,
47 * this can be one or two numbers, or it could even be a size of a structure. */
48 unsigned result_size;
49 /* The number of dwords for begin_query or end_query. */
50 unsigned num_cs_dw;
51 /* linked list of queries */
52 struct list_head list;
53 /* for custom non-GPU queries */
54 uint64_t begin_result;
55 uint64_t end_result;
56 /* Fence for GPU_FINISHED. */
57 struct pipe_fence_handle *fence;
58 /* For transform feedback: which stream the query is for */
59 unsigned stream;
60 };
61
62
63 static bool r600_is_timer_query(unsigned type)
64 {
65 return type == PIPE_QUERY_TIME_ELAPSED ||
66 type == PIPE_QUERY_TIMESTAMP;
67 }
68
69 static bool r600_query_needs_begin(unsigned type)
70 {
71 return type != PIPE_QUERY_GPU_FINISHED &&
72 type != PIPE_QUERY_TIMESTAMP;
73 }
74
75 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type)
76 {
77 unsigned j, i, num_results, buf_size = 4096;
78 uint32_t *results;
79
80 /* Non-GPU queries. */
81 switch (type) {
82 case PIPE_QUERY_TIMESTAMP_DISJOINT:
83 case PIPE_QUERY_GPU_FINISHED:
84 case R600_QUERY_DRAW_CALLS:
85 case R600_QUERY_REQUESTED_VRAM:
86 case R600_QUERY_REQUESTED_GTT:
87 case R600_QUERY_BUFFER_WAIT_TIME:
88 case R600_QUERY_NUM_CS_FLUSHES:
89 case R600_QUERY_NUM_BYTES_MOVED:
90 case R600_QUERY_VRAM_USAGE:
91 case R600_QUERY_GTT_USAGE:
92 case R600_QUERY_GPU_TEMPERATURE:
93 case R600_QUERY_CURRENT_GPU_SCLK:
94 case R600_QUERY_CURRENT_GPU_MCLK:
95 case R600_QUERY_GPU_LOAD:
96 case R600_QUERY_NUM_COMPILATIONS:
97 case R600_QUERY_NUM_SHADERS_CREATED:
98 return NULL;
99 }
100
101 /* Queries are normally read by the CPU after
102 * being written by the gpu, hence staging is probably a good
103 * usage pattern.
104 */
105 struct r600_resource *buf = (struct r600_resource*)
106 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
107 PIPE_USAGE_STAGING, buf_size);
108
109 switch (type) {
110 case PIPE_QUERY_OCCLUSION_COUNTER:
111 case PIPE_QUERY_OCCLUSION_PREDICATE:
112 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
113 memset(results, 0, buf_size);
114
115 /* Set top bits for unused backends. */
116 num_results = buf_size / (16 * ctx->max_db);
117 for (j = 0; j < num_results; j++) {
118 for (i = 0; i < ctx->max_db; i++) {
119 if (!(ctx->backend_mask & (1<<i))) {
120 results[(i * 4)+1] = 0x80000000;
121 results[(i * 4)+3] = 0x80000000;
122 }
123 }
124 results += 4 * ctx->max_db;
125 }
126 break;
127 case PIPE_QUERY_TIME_ELAPSED:
128 case PIPE_QUERY_TIMESTAMP:
129 break;
130 case PIPE_QUERY_PRIMITIVES_EMITTED:
131 case PIPE_QUERY_PRIMITIVES_GENERATED:
132 case PIPE_QUERY_SO_STATISTICS:
133 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
134 case PIPE_QUERY_PIPELINE_STATISTICS:
135 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
136 memset(results, 0, buf_size);
137 break;
138 default:
139 assert(0);
140 }
141 return buf;
142 }
143
144 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
145 unsigned type, int diff)
146 {
147 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
148 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
149 bool old_enable = rctx->num_occlusion_queries != 0;
150 bool enable;
151
152 rctx->num_occlusion_queries += diff;
153 assert(rctx->num_occlusion_queries >= 0);
154
155 enable = rctx->num_occlusion_queries != 0;
156
157 if (enable != old_enable) {
158 rctx->set_occlusion_query_state(&rctx->b, enable);
159 }
160 }
161 }
162
163 static unsigned event_type_for_stream(struct r600_query *query)
164 {
165 switch (query->stream) {
166 default:
167 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
168 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
169 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
170 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
171 }
172 }
173
174 static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
175 {
176 struct radeon_winsys_cs *cs = ctx->gfx.cs;
177 uint64_t va;
178
179 r600_update_occlusion_query_state(ctx, query->type, 1);
180 r600_update_prims_generated_query_state(ctx, query->type, 1);
181 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
182
183 /* Get a new query buffer if needed. */
184 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
185 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
186 *qbuf = query->buffer;
187 query->buffer.buf = r600_new_query_buffer(ctx, query->type);
188 query->buffer.results_end = 0;
189 query->buffer.previous = qbuf;
190 }
191
192 /* emit begin query */
193 va = query->buffer.buf->gpu_address + query->buffer.results_end;
194
195 switch (query->type) {
196 case PIPE_QUERY_OCCLUSION_COUNTER:
197 case PIPE_QUERY_OCCLUSION_PREDICATE:
198 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
199 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
200 radeon_emit(cs, va);
201 radeon_emit(cs, (va >> 32) & 0xFFFF);
202 break;
203 case PIPE_QUERY_PRIMITIVES_EMITTED:
204 case PIPE_QUERY_PRIMITIVES_GENERATED:
205 case PIPE_QUERY_SO_STATISTICS:
206 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
207 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
208 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
209 radeon_emit(cs, va);
210 radeon_emit(cs, (va >> 32) & 0xFFFF);
211 break;
212 case PIPE_QUERY_TIME_ELAPSED:
213 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
214 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
215 radeon_emit(cs, va);
216 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
217 radeon_emit(cs, 0);
218 radeon_emit(cs, 0);
219 break;
220 case PIPE_QUERY_PIPELINE_STATISTICS:
221 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
222 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
223 radeon_emit(cs, va);
224 radeon_emit(cs, (va >> 32) & 0xFFFF);
225 break;
226 default:
227 assert(0);
228 }
229 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
230 RADEON_PRIO_QUERY);
231
232 if (r600_is_timer_query(query->type))
233 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
234 else
235 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
236 }
237
238 static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
239 {
240 struct radeon_winsys_cs *cs = ctx->gfx.cs;
241 uint64_t va;
242
243 /* The queries which need begin already called this in begin_query. */
244 if (!r600_query_needs_begin(query->type)) {
245 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
246 }
247
248 va = query->buffer.buf->gpu_address;
249
250 /* emit end query */
251 switch (query->type) {
252 case PIPE_QUERY_OCCLUSION_COUNTER:
253 case PIPE_QUERY_OCCLUSION_PREDICATE:
254 va += query->buffer.results_end + 8;
255 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
256 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
257 radeon_emit(cs, va);
258 radeon_emit(cs, (va >> 32) & 0xFFFF);
259 break;
260 case PIPE_QUERY_PRIMITIVES_EMITTED:
261 case PIPE_QUERY_PRIMITIVES_GENERATED:
262 case PIPE_QUERY_SO_STATISTICS:
263 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
264 va += query->buffer.results_end + query->result_size/2;
265 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
266 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
267 radeon_emit(cs, va);
268 radeon_emit(cs, (va >> 32) & 0xFFFF);
269 break;
270 case PIPE_QUERY_TIME_ELAPSED:
271 va += query->buffer.results_end + query->result_size/2;
272 /* fall through */
273 case PIPE_QUERY_TIMESTAMP:
274 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
275 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
276 radeon_emit(cs, va);
277 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
278 radeon_emit(cs, 0);
279 radeon_emit(cs, 0);
280 break;
281 case PIPE_QUERY_PIPELINE_STATISTICS:
282 va += query->buffer.results_end + query->result_size/2;
283 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
284 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
285 radeon_emit(cs, va);
286 radeon_emit(cs, (va >> 32) & 0xFFFF);
287 break;
288 default:
289 assert(0);
290 }
291 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
292 RADEON_PRIO_QUERY);
293
294 query->buffer.results_end += query->result_size;
295
296 if (r600_query_needs_begin(query->type)) {
297 if (r600_is_timer_query(query->type))
298 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
299 else
300 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
301 }
302
303 r600_update_occlusion_query_state(ctx, query->type, -1);
304 r600_update_prims_generated_query_state(ctx, query->type, -1);
305 }
306
307 static void r600_emit_query_predication(struct r600_common_context *ctx,
308 struct r600_atom *atom)
309 {
310 struct radeon_winsys_cs *cs = ctx->gfx.cs;
311 struct r600_query *query = (struct r600_query*)ctx->render_cond;
312 struct r600_query_buffer *qbuf;
313 uint32_t op;
314 bool flag_wait;
315
316 if (!query)
317 return;
318
319 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
320 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
321
322 switch (query->type) {
323 case PIPE_QUERY_OCCLUSION_COUNTER:
324 case PIPE_QUERY_OCCLUSION_PREDICATE:
325 op = PRED_OP(PREDICATION_OP_ZPASS);
326 break;
327 case PIPE_QUERY_PRIMITIVES_EMITTED:
328 case PIPE_QUERY_PRIMITIVES_GENERATED:
329 case PIPE_QUERY_SO_STATISTICS:
330 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
331 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
332 break;
333 default:
334 assert(0);
335 return;
336 }
337
338 /* if true then invert, see GL_ARB_conditional_render_inverted */
339 if (ctx->render_cond_invert)
340 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
341 else
342 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
343
344 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
345
346 /* emit predicate packets for all data blocks */
347 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
348 unsigned results_base = 0;
349 uint64_t va = qbuf->buf->gpu_address;
350
351 while (results_base < qbuf->results_end) {
352 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
353 radeon_emit(cs, va + results_base);
354 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
355 r600_emit_reloc(ctx, &ctx->gfx, qbuf->buf, RADEON_USAGE_READ,
356 RADEON_PRIO_QUERY);
357 results_base += query->result_size;
358
359 /* set CONTINUE bit for all packets except the first */
360 op |= PREDICATION_CONTINUE;
361 }
362 }
363 }
364
365 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
366 {
367 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
368 struct r600_query *query;
369 bool skip_allocation = false;
370
371 query = CALLOC_STRUCT(r600_query);
372 if (query == NULL)
373 return NULL;
374
375 query->type = query_type;
376
377 switch (query_type) {
378 case PIPE_QUERY_OCCLUSION_COUNTER:
379 case PIPE_QUERY_OCCLUSION_PREDICATE:
380 query->result_size = 16 * rctx->max_db;
381 query->num_cs_dw = 6;
382 break;
383 break;
384 case PIPE_QUERY_TIME_ELAPSED:
385 query->result_size = 16;
386 query->num_cs_dw = 8;
387 break;
388 case PIPE_QUERY_TIMESTAMP:
389 query->result_size = 8;
390 query->num_cs_dw = 8;
391 break;
392 case PIPE_QUERY_PRIMITIVES_EMITTED:
393 case PIPE_QUERY_PRIMITIVES_GENERATED:
394 case PIPE_QUERY_SO_STATISTICS:
395 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
396 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
397 query->result_size = 32;
398 query->num_cs_dw = 6;
399 query->stream = index;
400 break;
401 case PIPE_QUERY_PIPELINE_STATISTICS:
402 /* 11 values on EG, 8 on R600. */
403 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
404 query->num_cs_dw = 6;
405 break;
406 /* Non-GPU queries and queries not requiring a buffer. */
407 case PIPE_QUERY_TIMESTAMP_DISJOINT:
408 case PIPE_QUERY_GPU_FINISHED:
409 case R600_QUERY_DRAW_CALLS:
410 case R600_QUERY_REQUESTED_VRAM:
411 case R600_QUERY_REQUESTED_GTT:
412 case R600_QUERY_BUFFER_WAIT_TIME:
413 case R600_QUERY_NUM_CS_FLUSHES:
414 case R600_QUERY_NUM_BYTES_MOVED:
415 case R600_QUERY_VRAM_USAGE:
416 case R600_QUERY_GTT_USAGE:
417 case R600_QUERY_GPU_TEMPERATURE:
418 case R600_QUERY_CURRENT_GPU_SCLK:
419 case R600_QUERY_CURRENT_GPU_MCLK:
420 case R600_QUERY_GPU_LOAD:
421 case R600_QUERY_NUM_COMPILATIONS:
422 case R600_QUERY_NUM_SHADERS_CREATED:
423 skip_allocation = true;
424 break;
425 default:
426 assert(0);
427 FREE(query);
428 return NULL;
429 }
430
431 if (!skip_allocation) {
432 query->buffer.buf = r600_new_query_buffer(rctx, query_type);
433 if (!query->buffer.buf) {
434 FREE(query);
435 return NULL;
436 }
437 }
438 return (struct pipe_query*)query;
439 }
440
441 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
442 {
443 struct r600_query *rquery = (struct r600_query*)query;
444 struct r600_query_buffer *prev = rquery->buffer.previous;
445
446 /* Release all query buffers. */
447 while (prev) {
448 struct r600_query_buffer *qbuf = prev;
449 prev = prev->previous;
450 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
451 FREE(qbuf);
452 }
453
454 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
455 FREE(query);
456 }
457
458 static boolean r600_begin_query(struct pipe_context *ctx,
459 struct pipe_query *query)
460 {
461 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
462 struct r600_query *rquery = (struct r600_query *)query;
463 struct r600_query_buffer *prev = rquery->buffer.previous;
464
465 if (!r600_query_needs_begin(rquery->type)) {
466 assert(0);
467 return false;
468 }
469
470 /* Non-GPU queries. */
471 switch (rquery->type) {
472 case PIPE_QUERY_TIMESTAMP_DISJOINT:
473 return true;
474 case R600_QUERY_DRAW_CALLS:
475 rquery->begin_result = rctx->num_draw_calls;
476 return true;
477 case R600_QUERY_REQUESTED_VRAM:
478 case R600_QUERY_REQUESTED_GTT:
479 case R600_QUERY_VRAM_USAGE:
480 case R600_QUERY_GTT_USAGE:
481 case R600_QUERY_GPU_TEMPERATURE:
482 case R600_QUERY_CURRENT_GPU_SCLK:
483 case R600_QUERY_CURRENT_GPU_MCLK:
484 rquery->begin_result = 0;
485 return true;
486 case R600_QUERY_BUFFER_WAIT_TIME:
487 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
488 return true;
489 case R600_QUERY_NUM_CS_FLUSHES:
490 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
491 return true;
492 case R600_QUERY_NUM_BYTES_MOVED:
493 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
494 return true;
495 case R600_QUERY_GPU_LOAD:
496 rquery->begin_result = r600_gpu_load_begin(rctx->screen);
497 return true;
498 case R600_QUERY_NUM_COMPILATIONS:
499 rquery->begin_result = p_atomic_read(&rctx->screen->num_compilations);
500 return true;
501 case R600_QUERY_NUM_SHADERS_CREATED:
502 rquery->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
503 return true;
504 }
505
506 /* Discard the old query buffers. */
507 while (prev) {
508 struct r600_query_buffer *qbuf = prev;
509 prev = prev->previous;
510 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
511 FREE(qbuf);
512 }
513
514 /* Obtain a new buffer if the current one can't be mapped without a stall. */
515 if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
516 !rctx->ws->buffer_wait(rquery->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
517 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
518 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
519 }
520
521 rquery->buffer.results_end = 0;
522 rquery->buffer.previous = NULL;
523
524 r600_emit_query_begin(rctx, rquery);
525
526 if (r600_is_timer_query(rquery->type))
527 LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
528 else
529 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
530 return true;
531 }
532
533 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
534 {
535 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
536 struct r600_query *rquery = (struct r600_query *)query;
537
538 /* Non-GPU queries. */
539 switch (rquery->type) {
540 case PIPE_QUERY_TIMESTAMP_DISJOINT:
541 return;
542 case PIPE_QUERY_GPU_FINISHED:
543 ctx->flush(ctx, &rquery->fence, 0);
544 return;
545 case R600_QUERY_DRAW_CALLS:
546 rquery->end_result = rctx->num_draw_calls;
547 return;
548 case R600_QUERY_REQUESTED_VRAM:
549 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY);
550 return;
551 case R600_QUERY_REQUESTED_GTT:
552 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY);
553 return;
554 case R600_QUERY_BUFFER_WAIT_TIME:
555 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
556 return;
557 case R600_QUERY_NUM_CS_FLUSHES:
558 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
559 return;
560 case R600_QUERY_NUM_BYTES_MOVED:
561 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
562 return;
563 case R600_QUERY_VRAM_USAGE:
564 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_VRAM_USAGE);
565 return;
566 case R600_QUERY_GTT_USAGE:
567 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GTT_USAGE);
568 return;
569 case R600_QUERY_GPU_TEMPERATURE:
570 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GPU_TEMPERATURE) / 1000;
571 return;
572 case R600_QUERY_CURRENT_GPU_SCLK:
573 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_SCLK) * 1000000;
574 return;
575 case R600_QUERY_CURRENT_GPU_MCLK:
576 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_MCLK) * 1000000;
577 return;
578 case R600_QUERY_GPU_LOAD:
579 rquery->end_result = r600_gpu_load_end(rctx->screen, rquery->begin_result);
580 return;
581 case R600_QUERY_NUM_COMPILATIONS:
582 rquery->end_result = p_atomic_read(&rctx->screen->num_compilations);
583 return;
584 case R600_QUERY_NUM_SHADERS_CREATED:
585 rquery->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
586 return;
587 }
588
589 r600_emit_query_end(rctx, rquery);
590
591 if (r600_query_needs_begin(rquery->type))
592 LIST_DELINIT(&rquery->list);
593 }
594
595 static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
596 bool test_status_bit)
597 {
598 uint32_t *current_result = (uint32_t*)map;
599 uint64_t start, end;
600
601 start = (uint64_t)current_result[start_index] |
602 (uint64_t)current_result[start_index+1] << 32;
603 end = (uint64_t)current_result[end_index] |
604 (uint64_t)current_result[end_index+1] << 32;
605
606 if (!test_status_bit ||
607 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
608 return end - start;
609 }
610 return 0;
611 }
612
613 static boolean r600_get_query_buffer_result(struct r600_common_context *ctx,
614 struct r600_query *query,
615 struct r600_query_buffer *qbuf,
616 boolean wait,
617 union pipe_query_result *result)
618 {
619 struct pipe_screen *screen = ctx->b.screen;
620 unsigned results_base = 0;
621 char *map;
622
623 /* Non-GPU queries. */
624 switch (query->type) {
625 case PIPE_QUERY_TIMESTAMP_DISJOINT:
626 /* Convert from cycles per millisecond to cycles per second (Hz). */
627 result->timestamp_disjoint.frequency =
628 (uint64_t)ctx->screen->info.r600_clock_crystal_freq * 1000;
629 result->timestamp_disjoint.disjoint = FALSE;
630 return TRUE;
631 case PIPE_QUERY_GPU_FINISHED:
632 result->b = screen->fence_finish(screen, query->fence,
633 wait ? PIPE_TIMEOUT_INFINITE : 0);
634 return result->b;
635 case R600_QUERY_DRAW_CALLS:
636 case R600_QUERY_REQUESTED_VRAM:
637 case R600_QUERY_REQUESTED_GTT:
638 case R600_QUERY_BUFFER_WAIT_TIME:
639 case R600_QUERY_NUM_CS_FLUSHES:
640 case R600_QUERY_NUM_BYTES_MOVED:
641 case R600_QUERY_VRAM_USAGE:
642 case R600_QUERY_GTT_USAGE:
643 case R600_QUERY_GPU_TEMPERATURE:
644 case R600_QUERY_CURRENT_GPU_SCLK:
645 case R600_QUERY_CURRENT_GPU_MCLK:
646 case R600_QUERY_NUM_COMPILATIONS:
647 case R600_QUERY_NUM_SHADERS_CREATED:
648 result->u64 = query->end_result - query->begin_result;
649 return TRUE;
650 case R600_QUERY_GPU_LOAD:
651 result->u64 = query->end_result;
652 return TRUE;
653 }
654
655 map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf,
656 PIPE_TRANSFER_READ |
657 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
658 if (!map)
659 return FALSE;
660
661 /* count all results across all data blocks */
662 switch (query->type) {
663 case PIPE_QUERY_OCCLUSION_COUNTER:
664 while (results_base != qbuf->results_end) {
665 result->u64 +=
666 r600_query_read_result(map + results_base, 0, 2, true);
667 results_base += 16;
668 }
669 break;
670 case PIPE_QUERY_OCCLUSION_PREDICATE:
671 while (results_base != qbuf->results_end) {
672 result->b = result->b ||
673 r600_query_read_result(map + results_base, 0, 2, true) != 0;
674 results_base += 16;
675 }
676 break;
677 case PIPE_QUERY_TIME_ELAPSED:
678 while (results_base != qbuf->results_end) {
679 result->u64 +=
680 r600_query_read_result(map + results_base, 0, 2, false);
681 results_base += query->result_size;
682 }
683 break;
684 case PIPE_QUERY_TIMESTAMP:
685 {
686 uint32_t *current_result = (uint32_t*)map;
687 result->u64 = (uint64_t)current_result[0] |
688 (uint64_t)current_result[1] << 32;
689 break;
690 }
691 case PIPE_QUERY_PRIMITIVES_EMITTED:
692 /* SAMPLE_STREAMOUTSTATS stores this structure:
693 * {
694 * u64 NumPrimitivesWritten;
695 * u64 PrimitiveStorageNeeded;
696 * }
697 * We only need NumPrimitivesWritten here. */
698 while (results_base != qbuf->results_end) {
699 result->u64 +=
700 r600_query_read_result(map + results_base, 2, 6, true);
701 results_base += query->result_size;
702 }
703 break;
704 case PIPE_QUERY_PRIMITIVES_GENERATED:
705 /* Here we read PrimitiveStorageNeeded. */
706 while (results_base != qbuf->results_end) {
707 result->u64 +=
708 r600_query_read_result(map + results_base, 0, 4, true);
709 results_base += query->result_size;
710 }
711 break;
712 case PIPE_QUERY_SO_STATISTICS:
713 while (results_base != qbuf->results_end) {
714 result->so_statistics.num_primitives_written +=
715 r600_query_read_result(map + results_base, 2, 6, true);
716 result->so_statistics.primitives_storage_needed +=
717 r600_query_read_result(map + results_base, 0, 4, true);
718 results_base += query->result_size;
719 }
720 break;
721 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
722 while (results_base != qbuf->results_end) {
723 result->b = result->b ||
724 r600_query_read_result(map + results_base, 2, 6, true) !=
725 r600_query_read_result(map + results_base, 0, 4, true);
726 results_base += query->result_size;
727 }
728 break;
729 case PIPE_QUERY_PIPELINE_STATISTICS:
730 if (ctx->chip_class >= EVERGREEN) {
731 while (results_base != qbuf->results_end) {
732 result->pipeline_statistics.ps_invocations +=
733 r600_query_read_result(map + results_base, 0, 22, false);
734 result->pipeline_statistics.c_primitives +=
735 r600_query_read_result(map + results_base, 2, 24, false);
736 result->pipeline_statistics.c_invocations +=
737 r600_query_read_result(map + results_base, 4, 26, false);
738 result->pipeline_statistics.vs_invocations +=
739 r600_query_read_result(map + results_base, 6, 28, false);
740 result->pipeline_statistics.gs_invocations +=
741 r600_query_read_result(map + results_base, 8, 30, false);
742 result->pipeline_statistics.gs_primitives +=
743 r600_query_read_result(map + results_base, 10, 32, false);
744 result->pipeline_statistics.ia_primitives +=
745 r600_query_read_result(map + results_base, 12, 34, false);
746 result->pipeline_statistics.ia_vertices +=
747 r600_query_read_result(map + results_base, 14, 36, false);
748 result->pipeline_statistics.hs_invocations +=
749 r600_query_read_result(map + results_base, 16, 38, false);
750 result->pipeline_statistics.ds_invocations +=
751 r600_query_read_result(map + results_base, 18, 40, false);
752 result->pipeline_statistics.cs_invocations +=
753 r600_query_read_result(map + results_base, 20, 42, false);
754 results_base += query->result_size;
755 }
756 } else {
757 while (results_base != qbuf->results_end) {
758 result->pipeline_statistics.ps_invocations +=
759 r600_query_read_result(map + results_base, 0, 16, false);
760 result->pipeline_statistics.c_primitives +=
761 r600_query_read_result(map + results_base, 2, 18, false);
762 result->pipeline_statistics.c_invocations +=
763 r600_query_read_result(map + results_base, 4, 20, false);
764 result->pipeline_statistics.vs_invocations +=
765 r600_query_read_result(map + results_base, 6, 22, false);
766 result->pipeline_statistics.gs_invocations +=
767 r600_query_read_result(map + results_base, 8, 24, false);
768 result->pipeline_statistics.gs_primitives +=
769 r600_query_read_result(map + results_base, 10, 26, false);
770 result->pipeline_statistics.ia_primitives +=
771 r600_query_read_result(map + results_base, 12, 28, false);
772 result->pipeline_statistics.ia_vertices +=
773 r600_query_read_result(map + results_base, 14, 30, false);
774 results_base += query->result_size;
775 }
776 }
777 #if 0 /* for testing */
778 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
779 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
780 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
781 result->pipeline_statistics.ia_vertices,
782 result->pipeline_statistics.ia_primitives,
783 result->pipeline_statistics.vs_invocations,
784 result->pipeline_statistics.hs_invocations,
785 result->pipeline_statistics.ds_invocations,
786 result->pipeline_statistics.gs_invocations,
787 result->pipeline_statistics.gs_primitives,
788 result->pipeline_statistics.c_invocations,
789 result->pipeline_statistics.c_primitives,
790 result->pipeline_statistics.ps_invocations,
791 result->pipeline_statistics.cs_invocations);
792 #endif
793 break;
794 default:
795 assert(0);
796 }
797
798 return TRUE;
799 }
800
801 static boolean r600_get_query_result(struct pipe_context *ctx,
802 struct pipe_query *query,
803 boolean wait, union pipe_query_result *result)
804 {
805 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
806 struct r600_query *rquery = (struct r600_query *)query;
807 struct r600_query_buffer *qbuf;
808
809 util_query_clear_result(result, rquery->type);
810
811 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
812 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
813 return FALSE;
814 }
815 }
816
817 /* Convert the time to expected units. */
818 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
819 rquery->type == PIPE_QUERY_TIMESTAMP) {
820 result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
821 }
822 return TRUE;
823 }
824
825 static void r600_render_condition(struct pipe_context *ctx,
826 struct pipe_query *query,
827 boolean condition,
828 uint mode)
829 {
830 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
831 struct r600_query *rquery = (struct r600_query*)query;
832 struct r600_query_buffer *qbuf;
833 struct r600_atom *atom = &rctx->render_cond_atom;
834
835 rctx->render_cond = query;
836 rctx->render_cond_invert = condition;
837 rctx->render_cond_mode = mode;
838
839 /* Compute the size of SET_PREDICATION packets. */
840 atom->num_dw = 0;
841 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
842 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
843
844 rctx->set_atom_dirty(rctx, atom, query != NULL);
845 }
846
847 static void r600_suspend_queries(struct r600_common_context *ctx,
848 struct list_head *query_list,
849 unsigned *num_cs_dw_queries_suspend)
850 {
851 struct r600_query *query;
852
853 LIST_FOR_EACH_ENTRY(query, query_list, list) {
854 r600_emit_query_end(ctx, query);
855 }
856 assert(*num_cs_dw_queries_suspend == 0);
857 }
858
859 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
860 {
861 r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
862 &ctx->num_cs_dw_nontimer_queries_suspend);
863 }
864
865 void r600_suspend_timer_queries(struct r600_common_context *ctx)
866 {
867 r600_suspend_queries(ctx, &ctx->active_timer_queries,
868 &ctx->num_cs_dw_timer_queries_suspend);
869 }
870
871 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
872 struct list_head *query_list)
873 {
874 struct r600_query *query;
875 unsigned num_dw = 0;
876
877 LIST_FOR_EACH_ENTRY(query, query_list, list) {
878 /* begin + end */
879 num_dw += query->num_cs_dw * 2;
880
881 /* Workaround for the fact that
882 * num_cs_dw_nontimer_queries_suspend is incremented for every
883 * resumed query, which raises the bar in need_cs_space for
884 * queries about to be resumed.
885 */
886 num_dw += query->num_cs_dw;
887 }
888 /* primitives generated query */
889 num_dw += ctx->streamout.enable_atom.num_dw;
890 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
891 num_dw += 13;
892
893 return num_dw;
894 }
895
896 static void r600_resume_queries(struct r600_common_context *ctx,
897 struct list_head *query_list,
898 unsigned *num_cs_dw_queries_suspend)
899 {
900 struct r600_query *query;
901 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
902
903 assert(*num_cs_dw_queries_suspend == 0);
904
905 /* Check CS space here. Resuming must not be interrupted by flushes. */
906 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
907
908 LIST_FOR_EACH_ENTRY(query, query_list, list) {
909 r600_emit_query_begin(ctx, query);
910 }
911 }
912
913 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
914 {
915 r600_resume_queries(ctx, &ctx->active_nontimer_queries,
916 &ctx->num_cs_dw_nontimer_queries_suspend);
917 }
918
919 void r600_resume_timer_queries(struct r600_common_context *ctx)
920 {
921 r600_resume_queries(ctx, &ctx->active_timer_queries,
922 &ctx->num_cs_dw_timer_queries_suspend);
923 }
924
925 /* Get backends mask */
926 void r600_query_init_backend_mask(struct r600_common_context *ctx)
927 {
928 struct radeon_winsys_cs *cs = ctx->gfx.cs;
929 struct r600_resource *buffer;
930 uint32_t *results;
931 unsigned num_backends = ctx->screen->info.r600_num_backends;
932 unsigned i, mask = 0;
933
934 /* if backend_map query is supported by the kernel */
935 if (ctx->screen->info.r600_backend_map_valid) {
936 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
937 unsigned backend_map = ctx->screen->info.r600_backend_map;
938 unsigned item_width, item_mask;
939
940 if (ctx->chip_class >= EVERGREEN) {
941 item_width = 4;
942 item_mask = 0x7;
943 } else {
944 item_width = 2;
945 item_mask = 0x3;
946 }
947
948 while(num_tile_pipes--) {
949 i = backend_map & item_mask;
950 mask |= (1<<i);
951 backend_map >>= item_width;
952 }
953 if (mask != 0) {
954 ctx->backend_mask = mask;
955 return;
956 }
957 }
958
959 /* otherwise backup path for older kernels */
960
961 /* create buffer for event data */
962 buffer = (struct r600_resource*)
963 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
964 PIPE_USAGE_STAGING, ctx->max_db*16);
965 if (!buffer)
966 goto err;
967
968 /* initialize buffer with zeroes */
969 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
970 if (results) {
971 memset(results, 0, ctx->max_db * 4 * 4);
972
973 /* emit EVENT_WRITE for ZPASS_DONE */
974 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
975 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
976 radeon_emit(cs, buffer->gpu_address);
977 radeon_emit(cs, buffer->gpu_address >> 32);
978
979 r600_emit_reloc(ctx, &ctx->gfx, buffer,
980 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
981
982 /* analyze results */
983 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
984 if (results) {
985 for(i = 0; i < ctx->max_db; i++) {
986 /* at least highest bit will be set if backend is used */
987 if (results[i*4 + 1])
988 mask |= (1<<i);
989 }
990 }
991 }
992
993 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
994
995 if (mask != 0) {
996 ctx->backend_mask = mask;
997 return;
998 }
999
1000 err:
1001 /* fallback to old method - set num_backends lower bits to 1 */
1002 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1003 return;
1004 }
1005
1006 #define X(name_, query_type_, type_, result_type_) \
1007 { \
1008 .name = name_, \
1009 .query_type = R600_QUERY_##query_type_, \
1010 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1011 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1012 .group_id = ~(unsigned)0 \
1013 }
1014
1015 static struct pipe_driver_query_info r600_driver_query_list[] = {
1016 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1017 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
1018 X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
1019 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
1020 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
1021 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
1022 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
1023 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
1024 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
1025 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
1026 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
1027 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
1028 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
1029 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
1030 };
1031
1032 #undef X
1033
1034 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
1035 {
1036 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
1037 return Elements(r600_driver_query_list);
1038 else if (rscreen->info.drm_major == 3)
1039 return Elements(r600_driver_query_list) - 3;
1040 else
1041 return Elements(r600_driver_query_list) - 4;
1042 }
1043
1044 static int r600_get_driver_query_info(struct pipe_screen *screen,
1045 unsigned index,
1046 struct pipe_driver_query_info *info)
1047 {
1048 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1049 unsigned num_queries = r600_get_num_queries(rscreen);
1050
1051 if (!info)
1052 return num_queries;
1053
1054 if (index >= num_queries)
1055 return 0;
1056
1057 *info = r600_driver_query_list[index];
1058
1059 switch (info->query_type) {
1060 case R600_QUERY_REQUESTED_VRAM:
1061 case R600_QUERY_VRAM_USAGE:
1062 info->max_value.u64 = rscreen->info.vram_size;
1063 break;
1064 case R600_QUERY_REQUESTED_GTT:
1065 case R600_QUERY_GTT_USAGE:
1066 info->max_value.u64 = rscreen->info.gart_size;
1067 break;
1068 case R600_QUERY_GPU_TEMPERATURE:
1069 info->max_value.u64 = 125;
1070 break;
1071 }
1072
1073 return 1;
1074 }
1075
1076 void r600_query_init(struct r600_common_context *rctx)
1077 {
1078 rctx->b.create_query = r600_create_query;
1079 rctx->b.destroy_query = r600_destroy_query;
1080 rctx->b.begin_query = r600_begin_query;
1081 rctx->b.end_query = r600_end_query;
1082 rctx->b.get_query_result = r600_get_query_result;
1083 rctx->render_cond_atom.emit = r600_emit_query_predication;
1084
1085 if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
1086 rctx->b.render_condition = r600_render_condition;
1087
1088 LIST_INITHEAD(&rctx->active_nontimer_queries);
1089 LIST_INITHEAD(&rctx->active_timer_queries);
1090 }
1091
1092 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
1093 {
1094 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
1095 }