radeonsi: don't use llvm.AMDIL.fraction for FRC and DFRAC
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_cs.h"
26 #include "util/u_memory.h"
27
28
29 struct r600_query_buffer {
30 /* The buffer where query results are stored. */
31 struct r600_resource *buf;
32 /* Offset of the next free result after current query data */
33 unsigned results_end;
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer *previous;
38 };
39
40 struct r600_query {
41 /* The query buffer and how many results are in it. */
42 struct r600_query_buffer buffer;
43 /* The type of query */
44 unsigned type;
45 /* Size of the result in memory for both begin_query and end_query,
46 * this can be one or two numbers, or it could even be a size of a structure. */
47 unsigned result_size;
48 /* The number of dwords for begin_query or end_query. */
49 unsigned num_cs_dw;
50 /* linked list of queries */
51 struct list_head list;
52 /* for custom non-GPU queries */
53 uint64_t begin_result;
54 uint64_t end_result;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle *fence;
57 /* For transform feedback: which stream the query is for */
58 unsigned stream;
59 };
60
61
62 static bool r600_is_timer_query(unsigned type)
63 {
64 return type == PIPE_QUERY_TIME_ELAPSED ||
65 type == PIPE_QUERY_TIMESTAMP;
66 }
67
68 static bool r600_query_needs_begin(unsigned type)
69 {
70 return type != PIPE_QUERY_GPU_FINISHED &&
71 type != PIPE_QUERY_TIMESTAMP;
72 }
73
74 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type)
75 {
76 unsigned j, i, num_results, buf_size = 4096;
77 uint32_t *results;
78
79 /* Non-GPU queries. */
80 switch (type) {
81 case PIPE_QUERY_TIMESTAMP_DISJOINT:
82 case PIPE_QUERY_GPU_FINISHED:
83 case R600_QUERY_DRAW_CALLS:
84 case R600_QUERY_REQUESTED_VRAM:
85 case R600_QUERY_REQUESTED_GTT:
86 case R600_QUERY_BUFFER_WAIT_TIME:
87 case R600_QUERY_NUM_CS_FLUSHES:
88 case R600_QUERY_NUM_BYTES_MOVED:
89 case R600_QUERY_VRAM_USAGE:
90 case R600_QUERY_GTT_USAGE:
91 case R600_QUERY_GPU_TEMPERATURE:
92 case R600_QUERY_CURRENT_GPU_SCLK:
93 case R600_QUERY_CURRENT_GPU_MCLK:
94 case R600_QUERY_GPU_LOAD:
95 return NULL;
96 }
97
98 /* Queries are normally read by the CPU after
99 * being written by the gpu, hence staging is probably a good
100 * usage pattern.
101 */
102 struct r600_resource *buf = (struct r600_resource*)
103 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
104 PIPE_USAGE_STAGING, buf_size);
105
106 switch (type) {
107 case PIPE_QUERY_OCCLUSION_COUNTER:
108 case PIPE_QUERY_OCCLUSION_PREDICATE:
109 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
110 memset(results, 0, buf_size);
111
112 /* Set top bits for unused backends. */
113 num_results = buf_size / (16 * ctx->max_db);
114 for (j = 0; j < num_results; j++) {
115 for (i = 0; i < ctx->max_db; i++) {
116 if (!(ctx->backend_mask & (1<<i))) {
117 results[(i * 4)+1] = 0x80000000;
118 results[(i * 4)+3] = 0x80000000;
119 }
120 }
121 results += 4 * ctx->max_db;
122 }
123 break;
124 case PIPE_QUERY_TIME_ELAPSED:
125 case PIPE_QUERY_TIMESTAMP:
126 break;
127 case PIPE_QUERY_PRIMITIVES_EMITTED:
128 case PIPE_QUERY_PRIMITIVES_GENERATED:
129 case PIPE_QUERY_SO_STATISTICS:
130 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
131 case PIPE_QUERY_PIPELINE_STATISTICS:
132 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
133 memset(results, 0, buf_size);
134 break;
135 default:
136 assert(0);
137 }
138 return buf;
139 }
140
141 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
142 unsigned type, int diff)
143 {
144 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
145 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
146 bool old_enable = rctx->num_occlusion_queries != 0;
147 bool enable;
148
149 rctx->num_occlusion_queries += diff;
150 assert(rctx->num_occlusion_queries >= 0);
151
152 enable = rctx->num_occlusion_queries != 0;
153
154 if (enable != old_enable) {
155 rctx->set_occlusion_query_state(&rctx->b, enable);
156 }
157 }
158 }
159
160 static unsigned event_type_for_stream(struct r600_query *query)
161 {
162 switch (query->stream) {
163 default:
164 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
165 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
166 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
167 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
168 }
169 }
170
171 static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
172 {
173 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
174 uint64_t va;
175
176 r600_update_occlusion_query_state(ctx, query->type, 1);
177 r600_update_prims_generated_query_state(ctx, query->type, 1);
178 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
179
180 /* Get a new query buffer if needed. */
181 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
182 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
183 *qbuf = query->buffer;
184 query->buffer.buf = r600_new_query_buffer(ctx, query->type);
185 query->buffer.results_end = 0;
186 query->buffer.previous = qbuf;
187 }
188
189 /* emit begin query */
190 va = query->buffer.buf->gpu_address + query->buffer.results_end;
191
192 switch (query->type) {
193 case PIPE_QUERY_OCCLUSION_COUNTER:
194 case PIPE_QUERY_OCCLUSION_PREDICATE:
195 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
196 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
197 radeon_emit(cs, va);
198 radeon_emit(cs, (va >> 32UL) & 0xFF);
199 break;
200 case PIPE_QUERY_PRIMITIVES_EMITTED:
201 case PIPE_QUERY_PRIMITIVES_GENERATED:
202 case PIPE_QUERY_SO_STATISTICS:
203 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
204 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
205 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
206 radeon_emit(cs, va);
207 radeon_emit(cs, (va >> 32UL) & 0xFF);
208 break;
209 case PIPE_QUERY_TIME_ELAPSED:
210 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
211 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
212 radeon_emit(cs, va);
213 radeon_emit(cs, (3 << 29) | ((va >> 32UL) & 0xFF));
214 radeon_emit(cs, 0);
215 radeon_emit(cs, 0);
216 break;
217 case PIPE_QUERY_PIPELINE_STATISTICS:
218 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
219 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
220 radeon_emit(cs, va);
221 radeon_emit(cs, (va >> 32UL) & 0xFF);
222 break;
223 default:
224 assert(0);
225 }
226 r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
227 RADEON_PRIO_MIN);
228
229 if (!r600_is_timer_query(query->type)) {
230 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
231 }
232 }
233
234 static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
235 {
236 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
237 uint64_t va;
238
239 /* The queries which need begin already called this in begin_query. */
240 if (!r600_query_needs_begin(query->type)) {
241 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
242 }
243
244 va = query->buffer.buf->gpu_address;
245
246 /* emit end query */
247 switch (query->type) {
248 case PIPE_QUERY_OCCLUSION_COUNTER:
249 case PIPE_QUERY_OCCLUSION_PREDICATE:
250 va += query->buffer.results_end + 8;
251 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
252 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
253 radeon_emit(cs, va);
254 radeon_emit(cs, (va >> 32UL) & 0xFF);
255 break;
256 case PIPE_QUERY_PRIMITIVES_EMITTED:
257 case PIPE_QUERY_PRIMITIVES_GENERATED:
258 case PIPE_QUERY_SO_STATISTICS:
259 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
260 va += query->buffer.results_end + query->result_size/2;
261 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
262 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
263 radeon_emit(cs, va);
264 radeon_emit(cs, (va >> 32UL) & 0xFF);
265 break;
266 case PIPE_QUERY_TIME_ELAPSED:
267 va += query->buffer.results_end + query->result_size/2;
268 /* fall through */
269 case PIPE_QUERY_TIMESTAMP:
270 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
271 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
272 radeon_emit(cs, va);
273 radeon_emit(cs, (3 << 29) | ((va >> 32UL) & 0xFF));
274 radeon_emit(cs, 0);
275 radeon_emit(cs, 0);
276 break;
277 case PIPE_QUERY_PIPELINE_STATISTICS:
278 va += query->buffer.results_end + query->result_size/2;
279 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
280 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
281 radeon_emit(cs, va);
282 radeon_emit(cs, (va >> 32UL) & 0xFF);
283 break;
284 default:
285 assert(0);
286 }
287 r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
288 RADEON_PRIO_MIN);
289
290 query->buffer.results_end += query->result_size;
291
292 if (r600_query_needs_begin(query->type)) {
293 if (!r600_is_timer_query(query->type)) {
294 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
295 }
296 }
297
298 r600_update_occlusion_query_state(ctx, query->type, -1);
299 r600_update_prims_generated_query_state(ctx, query->type, -1);
300 }
301
302 static void r600_emit_query_predication(struct r600_common_context *ctx, struct r600_query *query,
303 int operation, bool flag_wait)
304 {
305 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
306 uint32_t op = PRED_OP(operation);
307
308 /* if true then invert, see GL_ARB_conditional_render_inverted */
309 if (ctx->current_render_cond_cond)
310 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
311 else
312 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
313
314 if (operation == PREDICATION_OP_CLEAR) {
315 ctx->need_gfx_cs_space(&ctx->b, 3, FALSE);
316
317 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
318 radeon_emit(cs, 0);
319 radeon_emit(cs, PRED_OP(PREDICATION_OP_CLEAR));
320 } else {
321 struct r600_query_buffer *qbuf;
322 unsigned count;
323 /* Find how many results there are. */
324 count = 0;
325 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
326 count += qbuf->results_end / query->result_size;
327 }
328
329 ctx->need_gfx_cs_space(&ctx->b, 5 * count, TRUE);
330
331 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
332
333 /* emit predicate packets for all data blocks */
334 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
335 unsigned results_base = 0;
336 uint64_t va = qbuf->buf->gpu_address;
337
338 while (results_base < qbuf->results_end) {
339 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
340 radeon_emit(cs, (va + results_base) & 0xFFFFFFFFUL);
341 radeon_emit(cs, op | (((va + results_base) >> 32UL) & 0xFF));
342 r600_emit_reloc(ctx, &ctx->rings.gfx, qbuf->buf, RADEON_USAGE_READ,
343 RADEON_PRIO_MIN);
344 results_base += query->result_size;
345
346 /* set CONTINUE bit for all packets except the first */
347 op |= PREDICATION_CONTINUE;
348 }
349 }
350 }
351 }
352
353 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
354 {
355 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
356 struct r600_query *query;
357 bool skip_allocation = false;
358
359 query = CALLOC_STRUCT(r600_query);
360 if (query == NULL)
361 return NULL;
362
363 query->type = query_type;
364
365 switch (query_type) {
366 case PIPE_QUERY_OCCLUSION_COUNTER:
367 case PIPE_QUERY_OCCLUSION_PREDICATE:
368 query->result_size = 16 * rctx->max_db;
369 query->num_cs_dw = 6;
370 break;
371 break;
372 case PIPE_QUERY_TIME_ELAPSED:
373 query->result_size = 16;
374 query->num_cs_dw = 8;
375 break;
376 case PIPE_QUERY_TIMESTAMP:
377 query->result_size = 8;
378 query->num_cs_dw = 8;
379 break;
380 case PIPE_QUERY_PRIMITIVES_EMITTED:
381 case PIPE_QUERY_PRIMITIVES_GENERATED:
382 case PIPE_QUERY_SO_STATISTICS:
383 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
384 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
385 query->result_size = 32;
386 query->num_cs_dw = 6;
387 query->stream = index;
388 break;
389 case PIPE_QUERY_PIPELINE_STATISTICS:
390 /* 11 values on EG, 8 on R600. */
391 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
392 query->num_cs_dw = 6;
393 break;
394 /* Non-GPU queries and queries not requiring a buffer. */
395 case PIPE_QUERY_TIMESTAMP_DISJOINT:
396 case PIPE_QUERY_GPU_FINISHED:
397 case R600_QUERY_DRAW_CALLS:
398 case R600_QUERY_REQUESTED_VRAM:
399 case R600_QUERY_REQUESTED_GTT:
400 case R600_QUERY_BUFFER_WAIT_TIME:
401 case R600_QUERY_NUM_CS_FLUSHES:
402 case R600_QUERY_NUM_BYTES_MOVED:
403 case R600_QUERY_VRAM_USAGE:
404 case R600_QUERY_GTT_USAGE:
405 case R600_QUERY_GPU_TEMPERATURE:
406 case R600_QUERY_CURRENT_GPU_SCLK:
407 case R600_QUERY_CURRENT_GPU_MCLK:
408 case R600_QUERY_GPU_LOAD:
409 skip_allocation = true;
410 break;
411 default:
412 assert(0);
413 FREE(query);
414 return NULL;
415 }
416
417 if (!skip_allocation) {
418 query->buffer.buf = r600_new_query_buffer(rctx, query_type);
419 if (!query->buffer.buf) {
420 FREE(query);
421 return NULL;
422 }
423 }
424 return (struct pipe_query*)query;
425 }
426
427 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
428 {
429 struct r600_query *rquery = (struct r600_query*)query;
430 struct r600_query_buffer *prev = rquery->buffer.previous;
431
432 /* Release all query buffers. */
433 while (prev) {
434 struct r600_query_buffer *qbuf = prev;
435 prev = prev->previous;
436 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
437 FREE(qbuf);
438 }
439
440 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
441 FREE(query);
442 }
443
444 static boolean r600_begin_query(struct pipe_context *ctx,
445 struct pipe_query *query)
446 {
447 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
448 struct r600_query *rquery = (struct r600_query *)query;
449 struct r600_query_buffer *prev = rquery->buffer.previous;
450
451 if (!r600_query_needs_begin(rquery->type)) {
452 assert(0);
453 return false;
454 }
455
456 /* Non-GPU queries. */
457 switch (rquery->type) {
458 case PIPE_QUERY_TIMESTAMP_DISJOINT:
459 return true;
460 case R600_QUERY_DRAW_CALLS:
461 rquery->begin_result = rctx->num_draw_calls;
462 return true;
463 case R600_QUERY_REQUESTED_VRAM:
464 case R600_QUERY_REQUESTED_GTT:
465 case R600_QUERY_VRAM_USAGE:
466 case R600_QUERY_GTT_USAGE:
467 case R600_QUERY_GPU_TEMPERATURE:
468 case R600_QUERY_CURRENT_GPU_SCLK:
469 case R600_QUERY_CURRENT_GPU_MCLK:
470 rquery->begin_result = 0;
471 return true;
472 case R600_QUERY_BUFFER_WAIT_TIME:
473 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS);
474 return true;
475 case R600_QUERY_NUM_CS_FLUSHES:
476 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
477 return true;
478 case R600_QUERY_NUM_BYTES_MOVED:
479 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
480 return true;
481 case R600_QUERY_GPU_LOAD:
482 rquery->begin_result = r600_gpu_load_begin(rctx->screen);
483 return true;
484 }
485
486 /* Discard the old query buffers. */
487 while (prev) {
488 struct r600_query_buffer *qbuf = prev;
489 prev = prev->previous;
490 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
491 FREE(qbuf);
492 }
493
494 /* Obtain a new buffer if the current one can't be mapped without a stall. */
495 if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
496 rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) {
497 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
498 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
499 }
500
501 rquery->buffer.results_end = 0;
502 rquery->buffer.previous = NULL;
503
504 r600_emit_query_begin(rctx, rquery);
505
506 if (!r600_is_timer_query(rquery->type)) {
507 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
508 }
509 return true;
510 }
511
512 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
513 {
514 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
515 struct r600_query *rquery = (struct r600_query *)query;
516
517 /* Non-GPU queries. */
518 switch (rquery->type) {
519 case PIPE_QUERY_TIMESTAMP_DISJOINT:
520 return;
521 case PIPE_QUERY_GPU_FINISHED:
522 rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC, &rquery->fence);
523 return;
524 case R600_QUERY_DRAW_CALLS:
525 rquery->end_result = rctx->num_draw_calls;
526 return;
527 case R600_QUERY_REQUESTED_VRAM:
528 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY);
529 return;
530 case R600_QUERY_REQUESTED_GTT:
531 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY);
532 return;
533 case R600_QUERY_BUFFER_WAIT_TIME:
534 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS);
535 return;
536 case R600_QUERY_NUM_CS_FLUSHES:
537 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
538 return;
539 case R600_QUERY_NUM_BYTES_MOVED:
540 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
541 return;
542 case R600_QUERY_VRAM_USAGE:
543 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_VRAM_USAGE);
544 return;
545 case R600_QUERY_GTT_USAGE:
546 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GTT_USAGE);
547 return;
548 case R600_QUERY_GPU_TEMPERATURE:
549 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GPU_TEMPERATURE) / 1000;
550 return;
551 case R600_QUERY_CURRENT_GPU_SCLK:
552 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_SCLK) * 1000000;
553 return;
554 case R600_QUERY_CURRENT_GPU_MCLK:
555 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_MCLK) * 1000000;
556 return;
557 case R600_QUERY_GPU_LOAD:
558 rquery->end_result = r600_gpu_load_end(rctx->screen, rquery->begin_result);
559 return;
560 }
561
562 r600_emit_query_end(rctx, rquery);
563
564 if (r600_query_needs_begin(rquery->type) && !r600_is_timer_query(rquery->type)) {
565 LIST_DELINIT(&rquery->list);
566 }
567 }
568
569 static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
570 bool test_status_bit)
571 {
572 uint32_t *current_result = (uint32_t*)map;
573 uint64_t start, end;
574
575 start = (uint64_t)current_result[start_index] |
576 (uint64_t)current_result[start_index+1] << 32;
577 end = (uint64_t)current_result[end_index] |
578 (uint64_t)current_result[end_index+1] << 32;
579
580 if (!test_status_bit ||
581 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
582 return end - start;
583 }
584 return 0;
585 }
586
587 static boolean r600_get_query_buffer_result(struct r600_common_context *ctx,
588 struct r600_query *query,
589 struct r600_query_buffer *qbuf,
590 boolean wait,
591 union pipe_query_result *result)
592 {
593 struct pipe_screen *screen = ctx->b.screen;
594 unsigned results_base = 0;
595 char *map;
596
597 /* Non-GPU queries. */
598 switch (query->type) {
599 case PIPE_QUERY_TIMESTAMP_DISJOINT:
600 /* Convert from cycles per millisecond to cycles per second (Hz). */
601 result->timestamp_disjoint.frequency =
602 (uint64_t)ctx->screen->info.r600_clock_crystal_freq * 1000;
603 result->timestamp_disjoint.disjoint = FALSE;
604 return TRUE;
605 case PIPE_QUERY_GPU_FINISHED:
606 result->b = screen->fence_finish(screen, query->fence,
607 wait ? PIPE_TIMEOUT_INFINITE : 0);
608 return result->b;
609 case R600_QUERY_DRAW_CALLS:
610 case R600_QUERY_REQUESTED_VRAM:
611 case R600_QUERY_REQUESTED_GTT:
612 case R600_QUERY_BUFFER_WAIT_TIME:
613 case R600_QUERY_NUM_CS_FLUSHES:
614 case R600_QUERY_NUM_BYTES_MOVED:
615 case R600_QUERY_VRAM_USAGE:
616 case R600_QUERY_GTT_USAGE:
617 case R600_QUERY_GPU_TEMPERATURE:
618 case R600_QUERY_CURRENT_GPU_SCLK:
619 case R600_QUERY_CURRENT_GPU_MCLK:
620 result->u64 = query->end_result - query->begin_result;
621 return TRUE;
622 case R600_QUERY_GPU_LOAD:
623 result->u64 = query->end_result;
624 return TRUE;
625 }
626
627 map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf,
628 PIPE_TRANSFER_READ |
629 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
630 if (!map)
631 return FALSE;
632
633 /* count all results across all data blocks */
634 switch (query->type) {
635 case PIPE_QUERY_OCCLUSION_COUNTER:
636 while (results_base != qbuf->results_end) {
637 result->u64 +=
638 r600_query_read_result(map + results_base, 0, 2, true);
639 results_base += 16;
640 }
641 break;
642 case PIPE_QUERY_OCCLUSION_PREDICATE:
643 while (results_base != qbuf->results_end) {
644 result->b = result->b ||
645 r600_query_read_result(map + results_base, 0, 2, true) != 0;
646 results_base += 16;
647 }
648 break;
649 case PIPE_QUERY_TIME_ELAPSED:
650 while (results_base != qbuf->results_end) {
651 result->u64 +=
652 r600_query_read_result(map + results_base, 0, 2, false);
653 results_base += query->result_size;
654 }
655 break;
656 case PIPE_QUERY_TIMESTAMP:
657 {
658 uint32_t *current_result = (uint32_t*)map;
659 result->u64 = (uint64_t)current_result[0] |
660 (uint64_t)current_result[1] << 32;
661 break;
662 }
663 case PIPE_QUERY_PRIMITIVES_EMITTED:
664 /* SAMPLE_STREAMOUTSTATS stores this structure:
665 * {
666 * u64 NumPrimitivesWritten;
667 * u64 PrimitiveStorageNeeded;
668 * }
669 * We only need NumPrimitivesWritten here. */
670 while (results_base != qbuf->results_end) {
671 result->u64 +=
672 r600_query_read_result(map + results_base, 2, 6, true);
673 results_base += query->result_size;
674 }
675 break;
676 case PIPE_QUERY_PRIMITIVES_GENERATED:
677 /* Here we read PrimitiveStorageNeeded. */
678 while (results_base != qbuf->results_end) {
679 result->u64 +=
680 r600_query_read_result(map + results_base, 0, 4, true);
681 results_base += query->result_size;
682 }
683 break;
684 case PIPE_QUERY_SO_STATISTICS:
685 while (results_base != qbuf->results_end) {
686 result->so_statistics.num_primitives_written +=
687 r600_query_read_result(map + results_base, 2, 6, true);
688 result->so_statistics.primitives_storage_needed +=
689 r600_query_read_result(map + results_base, 0, 4, true);
690 results_base += query->result_size;
691 }
692 break;
693 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
694 while (results_base != qbuf->results_end) {
695 result->b = result->b ||
696 r600_query_read_result(map + results_base, 2, 6, true) !=
697 r600_query_read_result(map + results_base, 0, 4, true);
698 results_base += query->result_size;
699 }
700 break;
701 case PIPE_QUERY_PIPELINE_STATISTICS:
702 if (ctx->chip_class >= EVERGREEN) {
703 while (results_base != qbuf->results_end) {
704 result->pipeline_statistics.ps_invocations +=
705 r600_query_read_result(map + results_base, 0, 22, false);
706 result->pipeline_statistics.c_primitives +=
707 r600_query_read_result(map + results_base, 2, 24, false);
708 result->pipeline_statistics.c_invocations +=
709 r600_query_read_result(map + results_base, 4, 26, false);
710 result->pipeline_statistics.vs_invocations +=
711 r600_query_read_result(map + results_base, 6, 28, false);
712 result->pipeline_statistics.gs_invocations +=
713 r600_query_read_result(map + results_base, 8, 30, false);
714 result->pipeline_statistics.gs_primitives +=
715 r600_query_read_result(map + results_base, 10, 32, false);
716 result->pipeline_statistics.ia_primitives +=
717 r600_query_read_result(map + results_base, 12, 34, false);
718 result->pipeline_statistics.ia_vertices +=
719 r600_query_read_result(map + results_base, 14, 36, false);
720 result->pipeline_statistics.hs_invocations +=
721 r600_query_read_result(map + results_base, 16, 38, false);
722 result->pipeline_statistics.ds_invocations +=
723 r600_query_read_result(map + results_base, 18, 40, false);
724 result->pipeline_statistics.cs_invocations +=
725 r600_query_read_result(map + results_base, 20, 42, false);
726 results_base += query->result_size;
727 }
728 } else {
729 while (results_base != qbuf->results_end) {
730 result->pipeline_statistics.ps_invocations +=
731 r600_query_read_result(map + results_base, 0, 16, false);
732 result->pipeline_statistics.c_primitives +=
733 r600_query_read_result(map + results_base, 2, 18, false);
734 result->pipeline_statistics.c_invocations +=
735 r600_query_read_result(map + results_base, 4, 20, false);
736 result->pipeline_statistics.vs_invocations +=
737 r600_query_read_result(map + results_base, 6, 22, false);
738 result->pipeline_statistics.gs_invocations +=
739 r600_query_read_result(map + results_base, 8, 24, false);
740 result->pipeline_statistics.gs_primitives +=
741 r600_query_read_result(map + results_base, 10, 26, false);
742 result->pipeline_statistics.ia_primitives +=
743 r600_query_read_result(map + results_base, 12, 28, false);
744 result->pipeline_statistics.ia_vertices +=
745 r600_query_read_result(map + results_base, 14, 30, false);
746 results_base += query->result_size;
747 }
748 }
749 #if 0 /* for testing */
750 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
751 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
752 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
753 result->pipeline_statistics.ia_vertices,
754 result->pipeline_statistics.ia_primitives,
755 result->pipeline_statistics.vs_invocations,
756 result->pipeline_statistics.hs_invocations,
757 result->pipeline_statistics.ds_invocations,
758 result->pipeline_statistics.gs_invocations,
759 result->pipeline_statistics.gs_primitives,
760 result->pipeline_statistics.c_invocations,
761 result->pipeline_statistics.c_primitives,
762 result->pipeline_statistics.ps_invocations,
763 result->pipeline_statistics.cs_invocations);
764 #endif
765 break;
766 default:
767 assert(0);
768 }
769
770 return TRUE;
771 }
772
773 static boolean r600_get_query_result(struct pipe_context *ctx,
774 struct pipe_query *query,
775 boolean wait, union pipe_query_result *result)
776 {
777 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
778 struct r600_query *rquery = (struct r600_query *)query;
779 struct r600_query_buffer *qbuf;
780
781 util_query_clear_result(result, rquery->type);
782
783 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
784 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
785 return FALSE;
786 }
787 }
788
789 /* Convert the time to expected units. */
790 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
791 rquery->type == PIPE_QUERY_TIMESTAMP) {
792 result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
793 }
794 return TRUE;
795 }
796
797 static void r600_render_condition(struct pipe_context *ctx,
798 struct pipe_query *query,
799 boolean condition,
800 uint mode)
801 {
802 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
803 struct r600_query *rquery = (struct r600_query *)query;
804 bool wait_flag = false;
805
806 rctx->current_render_cond = query;
807 rctx->current_render_cond_cond = condition;
808 rctx->current_render_cond_mode = mode;
809
810 if (query == NULL) {
811 if (rctx->predicate_drawing) {
812 rctx->predicate_drawing = false;
813 r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false);
814 }
815 return;
816 }
817
818 if (mode == PIPE_RENDER_COND_WAIT ||
819 mode == PIPE_RENDER_COND_BY_REGION_WAIT) {
820 wait_flag = true;
821 }
822
823 rctx->predicate_drawing = true;
824
825 switch (rquery->type) {
826 case PIPE_QUERY_OCCLUSION_COUNTER:
827 case PIPE_QUERY_OCCLUSION_PREDICATE:
828 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag);
829 break;
830 case PIPE_QUERY_PRIMITIVES_EMITTED:
831 case PIPE_QUERY_PRIMITIVES_GENERATED:
832 case PIPE_QUERY_SO_STATISTICS:
833 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
834 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag);
835 break;
836 default:
837 assert(0);
838 }
839 }
840
841 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
842 {
843 struct r600_query *query;
844
845 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
846 r600_emit_query_end(ctx, query);
847 }
848 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
849 }
850
851 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx)
852 {
853 struct r600_query *query;
854 unsigned num_dw = 0;
855
856 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
857 /* begin + end */
858 num_dw += query->num_cs_dw * 2;
859
860 /* Workaround for the fact that
861 * num_cs_dw_nontimer_queries_suspend is incremented for every
862 * resumed query, which raises the bar in need_cs_space for
863 * queries about to be resumed.
864 */
865 num_dw += query->num_cs_dw;
866 }
867 /* primitives generated query */
868 num_dw += ctx->streamout.enable_atom.num_dw;
869 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
870 num_dw += 13;
871
872 return num_dw;
873 }
874
875 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
876 {
877 struct r600_query *query;
878
879 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
880
881 /* Check CS space here. Resuming must not be interrupted by flushes. */
882 ctx->need_gfx_cs_space(&ctx->b,
883 r600_queries_num_cs_dw_for_resuming(ctx), TRUE);
884
885 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
886 r600_emit_query_begin(ctx, query);
887 }
888 }
889
890 /* Get backends mask */
891 void r600_query_init_backend_mask(struct r600_common_context *ctx)
892 {
893 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
894 struct r600_resource *buffer;
895 uint32_t *results;
896 unsigned num_backends = ctx->screen->info.r600_num_backends;
897 unsigned i, mask = 0;
898
899 /* if backend_map query is supported by the kernel */
900 if (ctx->screen->info.r600_backend_map_valid) {
901 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
902 unsigned backend_map = ctx->screen->info.r600_backend_map;
903 unsigned item_width, item_mask;
904
905 if (ctx->chip_class >= EVERGREEN) {
906 item_width = 4;
907 item_mask = 0x7;
908 } else {
909 item_width = 2;
910 item_mask = 0x3;
911 }
912
913 while(num_tile_pipes--) {
914 i = backend_map & item_mask;
915 mask |= (1<<i);
916 backend_map >>= item_width;
917 }
918 if (mask != 0) {
919 ctx->backend_mask = mask;
920 return;
921 }
922 }
923
924 /* otherwise backup path for older kernels */
925
926 /* create buffer for event data */
927 buffer = (struct r600_resource*)
928 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
929 PIPE_USAGE_STAGING, ctx->max_db*16);
930 if (!buffer)
931 goto err;
932
933 /* initialize buffer with zeroes */
934 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
935 if (results) {
936 memset(results, 0, ctx->max_db * 4 * 4);
937
938 /* emit EVENT_WRITE for ZPASS_DONE */
939 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
940 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
941 radeon_emit(cs, buffer->gpu_address);
942 radeon_emit(cs, buffer->gpu_address >> 32);
943
944 r600_emit_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
945
946 /* analyze results */
947 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
948 if (results) {
949 for(i = 0; i < ctx->max_db; i++) {
950 /* at least highest bit will be set if backend is used */
951 if (results[i*4 + 1])
952 mask |= (1<<i);
953 }
954 }
955 }
956
957 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
958
959 if (mask != 0) {
960 ctx->backend_mask = mask;
961 return;
962 }
963
964 err:
965 /* fallback to old method - set num_backends lower bits to 1 */
966 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
967 return;
968 }
969
970 void r600_query_init(struct r600_common_context *rctx)
971 {
972 rctx->b.create_query = r600_create_query;
973 rctx->b.destroy_query = r600_destroy_query;
974 rctx->b.begin_query = r600_begin_query;
975 rctx->b.end_query = r600_end_query;
976 rctx->b.get_query_result = r600_get_query_result;
977
978 if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
979 rctx->b.render_condition = r600_render_condition;
980
981 LIST_INITHEAD(&rctx->active_nontimer_queries);
982 }