radeonsi: remove no-op 32-bit masking
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_cs.h"
26 #include "util/u_memory.h"
27
28
29 struct r600_query_buffer {
30 /* The buffer where query results are stored. */
31 struct r600_resource *buf;
32 /* Offset of the next free result after current query data */
33 unsigned results_end;
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer *previous;
38 };
39
40 struct r600_query {
41 /* The query buffer and how many results are in it. */
42 struct r600_query_buffer buffer;
43 /* The type of query */
44 unsigned type;
45 /* Size of the result in memory for both begin_query and end_query,
46 * this can be one or two numbers, or it could even be a size of a structure. */
47 unsigned result_size;
48 /* The number of dwords for begin_query or end_query. */
49 unsigned num_cs_dw;
50 /* linked list of queries */
51 struct list_head list;
52 /* for custom non-GPU queries */
53 uint64_t begin_result;
54 uint64_t end_result;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle *fence;
57 /* For transform feedback: which stream the query is for */
58 unsigned stream;
59 };
60
61
62 static bool r600_is_timer_query(unsigned type)
63 {
64 return type == PIPE_QUERY_TIME_ELAPSED ||
65 type == PIPE_QUERY_TIMESTAMP;
66 }
67
68 static bool r600_query_needs_begin(unsigned type)
69 {
70 return type != PIPE_QUERY_GPU_FINISHED &&
71 type != PIPE_QUERY_TIMESTAMP;
72 }
73
74 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type)
75 {
76 unsigned j, i, num_results, buf_size = 4096;
77 uint32_t *results;
78
79 /* Non-GPU queries. */
80 switch (type) {
81 case PIPE_QUERY_TIMESTAMP_DISJOINT:
82 case PIPE_QUERY_GPU_FINISHED:
83 case R600_QUERY_DRAW_CALLS:
84 case R600_QUERY_REQUESTED_VRAM:
85 case R600_QUERY_REQUESTED_GTT:
86 case R600_QUERY_BUFFER_WAIT_TIME:
87 case R600_QUERY_NUM_CS_FLUSHES:
88 case R600_QUERY_NUM_BYTES_MOVED:
89 case R600_QUERY_VRAM_USAGE:
90 case R600_QUERY_GTT_USAGE:
91 case R600_QUERY_GPU_TEMPERATURE:
92 case R600_QUERY_CURRENT_GPU_SCLK:
93 case R600_QUERY_CURRENT_GPU_MCLK:
94 case R600_QUERY_GPU_LOAD:
95 case R600_QUERY_NUM_COMPILATIONS:
96 case R600_QUERY_NUM_SHADERS_CREATED:
97 return NULL;
98 }
99
100 /* Queries are normally read by the CPU after
101 * being written by the gpu, hence staging is probably a good
102 * usage pattern.
103 */
104 struct r600_resource *buf = (struct r600_resource*)
105 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
106 PIPE_USAGE_STAGING, buf_size);
107
108 switch (type) {
109 case PIPE_QUERY_OCCLUSION_COUNTER:
110 case PIPE_QUERY_OCCLUSION_PREDICATE:
111 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
112 memset(results, 0, buf_size);
113
114 /* Set top bits for unused backends. */
115 num_results = buf_size / (16 * ctx->max_db);
116 for (j = 0; j < num_results; j++) {
117 for (i = 0; i < ctx->max_db; i++) {
118 if (!(ctx->backend_mask & (1<<i))) {
119 results[(i * 4)+1] = 0x80000000;
120 results[(i * 4)+3] = 0x80000000;
121 }
122 }
123 results += 4 * ctx->max_db;
124 }
125 break;
126 case PIPE_QUERY_TIME_ELAPSED:
127 case PIPE_QUERY_TIMESTAMP:
128 break;
129 case PIPE_QUERY_PRIMITIVES_EMITTED:
130 case PIPE_QUERY_PRIMITIVES_GENERATED:
131 case PIPE_QUERY_SO_STATISTICS:
132 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
133 case PIPE_QUERY_PIPELINE_STATISTICS:
134 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
135 memset(results, 0, buf_size);
136 break;
137 default:
138 assert(0);
139 }
140 return buf;
141 }
142
143 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
144 unsigned type, int diff)
145 {
146 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
147 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
148 bool old_enable = rctx->num_occlusion_queries != 0;
149 bool enable;
150
151 rctx->num_occlusion_queries += diff;
152 assert(rctx->num_occlusion_queries >= 0);
153
154 enable = rctx->num_occlusion_queries != 0;
155
156 if (enable != old_enable) {
157 rctx->set_occlusion_query_state(&rctx->b, enable);
158 }
159 }
160 }
161
162 static unsigned event_type_for_stream(struct r600_query *query)
163 {
164 switch (query->stream) {
165 default:
166 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
167 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
168 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
169 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
170 }
171 }
172
173 static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
174 {
175 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
176 uint64_t va;
177
178 r600_update_occlusion_query_state(ctx, query->type, 1);
179 r600_update_prims_generated_query_state(ctx, query->type, 1);
180 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
181
182 /* Get a new query buffer if needed. */
183 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
184 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
185 *qbuf = query->buffer;
186 query->buffer.buf = r600_new_query_buffer(ctx, query->type);
187 query->buffer.results_end = 0;
188 query->buffer.previous = qbuf;
189 }
190
191 /* emit begin query */
192 va = query->buffer.buf->gpu_address + query->buffer.results_end;
193
194 switch (query->type) {
195 case PIPE_QUERY_OCCLUSION_COUNTER:
196 case PIPE_QUERY_OCCLUSION_PREDICATE:
197 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
198 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
199 radeon_emit(cs, va);
200 radeon_emit(cs, (va >> 32) & 0xFFFF);
201 break;
202 case PIPE_QUERY_PRIMITIVES_EMITTED:
203 case PIPE_QUERY_PRIMITIVES_GENERATED:
204 case PIPE_QUERY_SO_STATISTICS:
205 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
206 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
207 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
208 radeon_emit(cs, va);
209 radeon_emit(cs, (va >> 32) & 0xFFFF);
210 break;
211 case PIPE_QUERY_TIME_ELAPSED:
212 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
213 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
214 radeon_emit(cs, va);
215 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
216 radeon_emit(cs, 0);
217 radeon_emit(cs, 0);
218 break;
219 case PIPE_QUERY_PIPELINE_STATISTICS:
220 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
221 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
222 radeon_emit(cs, va);
223 radeon_emit(cs, (va >> 32) & 0xFFFF);
224 break;
225 default:
226 assert(0);
227 }
228 r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
229 RADEON_PRIO_MIN);
230
231 if (r600_is_timer_query(query->type))
232 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
233 else
234 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
235 }
236
237 static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
238 {
239 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
240 uint64_t va;
241
242 /* The queries which need begin already called this in begin_query. */
243 if (!r600_query_needs_begin(query->type)) {
244 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
245 }
246
247 va = query->buffer.buf->gpu_address;
248
249 /* emit end query */
250 switch (query->type) {
251 case PIPE_QUERY_OCCLUSION_COUNTER:
252 case PIPE_QUERY_OCCLUSION_PREDICATE:
253 va += query->buffer.results_end + 8;
254 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
255 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
256 radeon_emit(cs, va);
257 radeon_emit(cs, (va >> 32) & 0xFFFF);
258 break;
259 case PIPE_QUERY_PRIMITIVES_EMITTED:
260 case PIPE_QUERY_PRIMITIVES_GENERATED:
261 case PIPE_QUERY_SO_STATISTICS:
262 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
263 va += query->buffer.results_end + query->result_size/2;
264 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
265 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
266 radeon_emit(cs, va);
267 radeon_emit(cs, (va >> 32) & 0xFFFF);
268 break;
269 case PIPE_QUERY_TIME_ELAPSED:
270 va += query->buffer.results_end + query->result_size/2;
271 /* fall through */
272 case PIPE_QUERY_TIMESTAMP:
273 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
274 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
275 radeon_emit(cs, va);
276 radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
277 radeon_emit(cs, 0);
278 radeon_emit(cs, 0);
279 break;
280 case PIPE_QUERY_PIPELINE_STATISTICS:
281 va += query->buffer.results_end + query->result_size/2;
282 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
283 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
284 radeon_emit(cs, va);
285 radeon_emit(cs, (va >> 32) & 0xFFFF);
286 break;
287 default:
288 assert(0);
289 }
290 r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
291 RADEON_PRIO_MIN);
292
293 query->buffer.results_end += query->result_size;
294
295 if (r600_query_needs_begin(query->type)) {
296 if (r600_is_timer_query(query->type))
297 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
298 else
299 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
300 }
301
302 r600_update_occlusion_query_state(ctx, query->type, -1);
303 r600_update_prims_generated_query_state(ctx, query->type, -1);
304 }
305
306 static void r600_emit_query_predication(struct r600_common_context *ctx, struct r600_query *query,
307 int operation, bool flag_wait)
308 {
309 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
310 uint32_t op = PRED_OP(operation);
311
312 /* if true then invert, see GL_ARB_conditional_render_inverted */
313 if (ctx->current_render_cond_cond)
314 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visable/overflow */
315 else
316 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visable/overflow */
317
318 if (operation == PREDICATION_OP_CLEAR) {
319 ctx->need_gfx_cs_space(&ctx->b, 3, FALSE);
320
321 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
322 radeon_emit(cs, 0);
323 radeon_emit(cs, PRED_OP(PREDICATION_OP_CLEAR));
324 } else {
325 struct r600_query_buffer *qbuf;
326 unsigned count;
327 /* Find how many results there are. */
328 count = 0;
329 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
330 count += qbuf->results_end / query->result_size;
331 }
332
333 ctx->need_gfx_cs_space(&ctx->b, 5 * count, TRUE);
334
335 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
336
337 /* emit predicate packets for all data blocks */
338 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
339 unsigned results_base = 0;
340 uint64_t va = qbuf->buf->gpu_address;
341
342 while (results_base < qbuf->results_end) {
343 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
344 radeon_emit(cs, va + results_base);
345 radeon_emit(cs, op | (((va + results_base) >> 32) & 0xFF));
346 r600_emit_reloc(ctx, &ctx->rings.gfx, qbuf->buf, RADEON_USAGE_READ,
347 RADEON_PRIO_MIN);
348 results_base += query->result_size;
349
350 /* set CONTINUE bit for all packets except the first */
351 op |= PREDICATION_CONTINUE;
352 }
353 }
354 }
355 }
356
357 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
358 {
359 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
360 struct r600_query *query;
361 bool skip_allocation = false;
362
363 query = CALLOC_STRUCT(r600_query);
364 if (query == NULL)
365 return NULL;
366
367 query->type = query_type;
368
369 switch (query_type) {
370 case PIPE_QUERY_OCCLUSION_COUNTER:
371 case PIPE_QUERY_OCCLUSION_PREDICATE:
372 query->result_size = 16 * rctx->max_db;
373 query->num_cs_dw = 6;
374 break;
375 break;
376 case PIPE_QUERY_TIME_ELAPSED:
377 query->result_size = 16;
378 query->num_cs_dw = 8;
379 break;
380 case PIPE_QUERY_TIMESTAMP:
381 query->result_size = 8;
382 query->num_cs_dw = 8;
383 break;
384 case PIPE_QUERY_PRIMITIVES_EMITTED:
385 case PIPE_QUERY_PRIMITIVES_GENERATED:
386 case PIPE_QUERY_SO_STATISTICS:
387 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
388 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
389 query->result_size = 32;
390 query->num_cs_dw = 6;
391 query->stream = index;
392 break;
393 case PIPE_QUERY_PIPELINE_STATISTICS:
394 /* 11 values on EG, 8 on R600. */
395 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
396 query->num_cs_dw = 6;
397 break;
398 /* Non-GPU queries and queries not requiring a buffer. */
399 case PIPE_QUERY_TIMESTAMP_DISJOINT:
400 case PIPE_QUERY_GPU_FINISHED:
401 case R600_QUERY_DRAW_CALLS:
402 case R600_QUERY_REQUESTED_VRAM:
403 case R600_QUERY_REQUESTED_GTT:
404 case R600_QUERY_BUFFER_WAIT_TIME:
405 case R600_QUERY_NUM_CS_FLUSHES:
406 case R600_QUERY_NUM_BYTES_MOVED:
407 case R600_QUERY_VRAM_USAGE:
408 case R600_QUERY_GTT_USAGE:
409 case R600_QUERY_GPU_TEMPERATURE:
410 case R600_QUERY_CURRENT_GPU_SCLK:
411 case R600_QUERY_CURRENT_GPU_MCLK:
412 case R600_QUERY_GPU_LOAD:
413 case R600_QUERY_NUM_COMPILATIONS:
414 case R600_QUERY_NUM_SHADERS_CREATED:
415 skip_allocation = true;
416 break;
417 default:
418 assert(0);
419 FREE(query);
420 return NULL;
421 }
422
423 if (!skip_allocation) {
424 query->buffer.buf = r600_new_query_buffer(rctx, query_type);
425 if (!query->buffer.buf) {
426 FREE(query);
427 return NULL;
428 }
429 }
430 return (struct pipe_query*)query;
431 }
432
433 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
434 {
435 struct r600_query *rquery = (struct r600_query*)query;
436 struct r600_query_buffer *prev = rquery->buffer.previous;
437
438 /* Release all query buffers. */
439 while (prev) {
440 struct r600_query_buffer *qbuf = prev;
441 prev = prev->previous;
442 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
443 FREE(qbuf);
444 }
445
446 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
447 FREE(query);
448 }
449
450 static boolean r600_begin_query(struct pipe_context *ctx,
451 struct pipe_query *query)
452 {
453 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
454 struct r600_query *rquery = (struct r600_query *)query;
455 struct r600_query_buffer *prev = rquery->buffer.previous;
456
457 if (!r600_query_needs_begin(rquery->type)) {
458 assert(0);
459 return false;
460 }
461
462 /* Non-GPU queries. */
463 switch (rquery->type) {
464 case PIPE_QUERY_TIMESTAMP_DISJOINT:
465 return true;
466 case R600_QUERY_DRAW_CALLS:
467 rquery->begin_result = rctx->num_draw_calls;
468 return true;
469 case R600_QUERY_REQUESTED_VRAM:
470 case R600_QUERY_REQUESTED_GTT:
471 case R600_QUERY_VRAM_USAGE:
472 case R600_QUERY_GTT_USAGE:
473 case R600_QUERY_GPU_TEMPERATURE:
474 case R600_QUERY_CURRENT_GPU_SCLK:
475 case R600_QUERY_CURRENT_GPU_MCLK:
476 rquery->begin_result = 0;
477 return true;
478 case R600_QUERY_BUFFER_WAIT_TIME:
479 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
480 return true;
481 case R600_QUERY_NUM_CS_FLUSHES:
482 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
483 return true;
484 case R600_QUERY_NUM_BYTES_MOVED:
485 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
486 return true;
487 case R600_QUERY_GPU_LOAD:
488 rquery->begin_result = r600_gpu_load_begin(rctx->screen);
489 return true;
490 case R600_QUERY_NUM_COMPILATIONS:
491 rquery->begin_result = p_atomic_read(&rctx->screen->num_compilations);
492 return true;
493 case R600_QUERY_NUM_SHADERS_CREATED:
494 rquery->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
495 return true;
496 }
497
498 /* Discard the old query buffers. */
499 while (prev) {
500 struct r600_query_buffer *qbuf = prev;
501 prev = prev->previous;
502 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
503 FREE(qbuf);
504 }
505
506 /* Obtain a new buffer if the current one can't be mapped without a stall. */
507 if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
508 !rctx->ws->buffer_wait(rquery->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
509 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
510 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
511 }
512
513 rquery->buffer.results_end = 0;
514 rquery->buffer.previous = NULL;
515
516 r600_emit_query_begin(rctx, rquery);
517
518 if (r600_is_timer_query(rquery->type))
519 LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
520 else
521 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
522 return true;
523 }
524
525 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
526 {
527 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
528 struct r600_query *rquery = (struct r600_query *)query;
529
530 /* Non-GPU queries. */
531 switch (rquery->type) {
532 case PIPE_QUERY_TIMESTAMP_DISJOINT:
533 return;
534 case PIPE_QUERY_GPU_FINISHED:
535 rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC, &rquery->fence);
536 return;
537 case R600_QUERY_DRAW_CALLS:
538 rquery->end_result = rctx->num_draw_calls;
539 return;
540 case R600_QUERY_REQUESTED_VRAM:
541 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY);
542 return;
543 case R600_QUERY_REQUESTED_GTT:
544 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY);
545 return;
546 case R600_QUERY_BUFFER_WAIT_TIME:
547 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS) / 1000;
548 return;
549 case R600_QUERY_NUM_CS_FLUSHES:
550 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
551 return;
552 case R600_QUERY_NUM_BYTES_MOVED:
553 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
554 return;
555 case R600_QUERY_VRAM_USAGE:
556 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_VRAM_USAGE);
557 return;
558 case R600_QUERY_GTT_USAGE:
559 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GTT_USAGE);
560 return;
561 case R600_QUERY_GPU_TEMPERATURE:
562 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GPU_TEMPERATURE) / 1000;
563 return;
564 case R600_QUERY_CURRENT_GPU_SCLK:
565 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_SCLK) * 1000000;
566 return;
567 case R600_QUERY_CURRENT_GPU_MCLK:
568 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_MCLK) * 1000000;
569 return;
570 case R600_QUERY_GPU_LOAD:
571 rquery->end_result = r600_gpu_load_end(rctx->screen, rquery->begin_result);
572 return;
573 case R600_QUERY_NUM_COMPILATIONS:
574 rquery->end_result = p_atomic_read(&rctx->screen->num_compilations);
575 return;
576 case R600_QUERY_NUM_SHADERS_CREATED:
577 rquery->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
578 return;
579 }
580
581 r600_emit_query_end(rctx, rquery);
582
583 if (r600_query_needs_begin(rquery->type))
584 LIST_DELINIT(&rquery->list);
585 }
586
587 static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
588 bool test_status_bit)
589 {
590 uint32_t *current_result = (uint32_t*)map;
591 uint64_t start, end;
592
593 start = (uint64_t)current_result[start_index] |
594 (uint64_t)current_result[start_index+1] << 32;
595 end = (uint64_t)current_result[end_index] |
596 (uint64_t)current_result[end_index+1] << 32;
597
598 if (!test_status_bit ||
599 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
600 return end - start;
601 }
602 return 0;
603 }
604
605 static boolean r600_get_query_buffer_result(struct r600_common_context *ctx,
606 struct r600_query *query,
607 struct r600_query_buffer *qbuf,
608 boolean wait,
609 union pipe_query_result *result)
610 {
611 struct pipe_screen *screen = ctx->b.screen;
612 unsigned results_base = 0;
613 char *map;
614
615 /* Non-GPU queries. */
616 switch (query->type) {
617 case PIPE_QUERY_TIMESTAMP_DISJOINT:
618 /* Convert from cycles per millisecond to cycles per second (Hz). */
619 result->timestamp_disjoint.frequency =
620 (uint64_t)ctx->screen->info.r600_clock_crystal_freq * 1000;
621 result->timestamp_disjoint.disjoint = FALSE;
622 return TRUE;
623 case PIPE_QUERY_GPU_FINISHED:
624 result->b = screen->fence_finish(screen, query->fence,
625 wait ? PIPE_TIMEOUT_INFINITE : 0);
626 return result->b;
627 case R600_QUERY_DRAW_CALLS:
628 case R600_QUERY_REQUESTED_VRAM:
629 case R600_QUERY_REQUESTED_GTT:
630 case R600_QUERY_BUFFER_WAIT_TIME:
631 case R600_QUERY_NUM_CS_FLUSHES:
632 case R600_QUERY_NUM_BYTES_MOVED:
633 case R600_QUERY_VRAM_USAGE:
634 case R600_QUERY_GTT_USAGE:
635 case R600_QUERY_GPU_TEMPERATURE:
636 case R600_QUERY_CURRENT_GPU_SCLK:
637 case R600_QUERY_CURRENT_GPU_MCLK:
638 case R600_QUERY_NUM_COMPILATIONS:
639 case R600_QUERY_NUM_SHADERS_CREATED:
640 result->u64 = query->end_result - query->begin_result;
641 return TRUE;
642 case R600_QUERY_GPU_LOAD:
643 result->u64 = query->end_result;
644 return TRUE;
645 }
646
647 map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf,
648 PIPE_TRANSFER_READ |
649 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
650 if (!map)
651 return FALSE;
652
653 /* count all results across all data blocks */
654 switch (query->type) {
655 case PIPE_QUERY_OCCLUSION_COUNTER:
656 while (results_base != qbuf->results_end) {
657 result->u64 +=
658 r600_query_read_result(map + results_base, 0, 2, true);
659 results_base += 16;
660 }
661 break;
662 case PIPE_QUERY_OCCLUSION_PREDICATE:
663 while (results_base != qbuf->results_end) {
664 result->b = result->b ||
665 r600_query_read_result(map + results_base, 0, 2, true) != 0;
666 results_base += 16;
667 }
668 break;
669 case PIPE_QUERY_TIME_ELAPSED:
670 while (results_base != qbuf->results_end) {
671 result->u64 +=
672 r600_query_read_result(map + results_base, 0, 2, false);
673 results_base += query->result_size;
674 }
675 break;
676 case PIPE_QUERY_TIMESTAMP:
677 {
678 uint32_t *current_result = (uint32_t*)map;
679 result->u64 = (uint64_t)current_result[0] |
680 (uint64_t)current_result[1] << 32;
681 break;
682 }
683 case PIPE_QUERY_PRIMITIVES_EMITTED:
684 /* SAMPLE_STREAMOUTSTATS stores this structure:
685 * {
686 * u64 NumPrimitivesWritten;
687 * u64 PrimitiveStorageNeeded;
688 * }
689 * We only need NumPrimitivesWritten here. */
690 while (results_base != qbuf->results_end) {
691 result->u64 +=
692 r600_query_read_result(map + results_base, 2, 6, true);
693 results_base += query->result_size;
694 }
695 break;
696 case PIPE_QUERY_PRIMITIVES_GENERATED:
697 /* Here we read PrimitiveStorageNeeded. */
698 while (results_base != qbuf->results_end) {
699 result->u64 +=
700 r600_query_read_result(map + results_base, 0, 4, true);
701 results_base += query->result_size;
702 }
703 break;
704 case PIPE_QUERY_SO_STATISTICS:
705 while (results_base != qbuf->results_end) {
706 result->so_statistics.num_primitives_written +=
707 r600_query_read_result(map + results_base, 2, 6, true);
708 result->so_statistics.primitives_storage_needed +=
709 r600_query_read_result(map + results_base, 0, 4, true);
710 results_base += query->result_size;
711 }
712 break;
713 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
714 while (results_base != qbuf->results_end) {
715 result->b = result->b ||
716 r600_query_read_result(map + results_base, 2, 6, true) !=
717 r600_query_read_result(map + results_base, 0, 4, true);
718 results_base += query->result_size;
719 }
720 break;
721 case PIPE_QUERY_PIPELINE_STATISTICS:
722 if (ctx->chip_class >= EVERGREEN) {
723 while (results_base != qbuf->results_end) {
724 result->pipeline_statistics.ps_invocations +=
725 r600_query_read_result(map + results_base, 0, 22, false);
726 result->pipeline_statistics.c_primitives +=
727 r600_query_read_result(map + results_base, 2, 24, false);
728 result->pipeline_statistics.c_invocations +=
729 r600_query_read_result(map + results_base, 4, 26, false);
730 result->pipeline_statistics.vs_invocations +=
731 r600_query_read_result(map + results_base, 6, 28, false);
732 result->pipeline_statistics.gs_invocations +=
733 r600_query_read_result(map + results_base, 8, 30, false);
734 result->pipeline_statistics.gs_primitives +=
735 r600_query_read_result(map + results_base, 10, 32, false);
736 result->pipeline_statistics.ia_primitives +=
737 r600_query_read_result(map + results_base, 12, 34, false);
738 result->pipeline_statistics.ia_vertices +=
739 r600_query_read_result(map + results_base, 14, 36, false);
740 result->pipeline_statistics.hs_invocations +=
741 r600_query_read_result(map + results_base, 16, 38, false);
742 result->pipeline_statistics.ds_invocations +=
743 r600_query_read_result(map + results_base, 18, 40, false);
744 result->pipeline_statistics.cs_invocations +=
745 r600_query_read_result(map + results_base, 20, 42, false);
746 results_base += query->result_size;
747 }
748 } else {
749 while (results_base != qbuf->results_end) {
750 result->pipeline_statistics.ps_invocations +=
751 r600_query_read_result(map + results_base, 0, 16, false);
752 result->pipeline_statistics.c_primitives +=
753 r600_query_read_result(map + results_base, 2, 18, false);
754 result->pipeline_statistics.c_invocations +=
755 r600_query_read_result(map + results_base, 4, 20, false);
756 result->pipeline_statistics.vs_invocations +=
757 r600_query_read_result(map + results_base, 6, 22, false);
758 result->pipeline_statistics.gs_invocations +=
759 r600_query_read_result(map + results_base, 8, 24, false);
760 result->pipeline_statistics.gs_primitives +=
761 r600_query_read_result(map + results_base, 10, 26, false);
762 result->pipeline_statistics.ia_primitives +=
763 r600_query_read_result(map + results_base, 12, 28, false);
764 result->pipeline_statistics.ia_vertices +=
765 r600_query_read_result(map + results_base, 14, 30, false);
766 results_base += query->result_size;
767 }
768 }
769 #if 0 /* for testing */
770 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
771 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
772 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
773 result->pipeline_statistics.ia_vertices,
774 result->pipeline_statistics.ia_primitives,
775 result->pipeline_statistics.vs_invocations,
776 result->pipeline_statistics.hs_invocations,
777 result->pipeline_statistics.ds_invocations,
778 result->pipeline_statistics.gs_invocations,
779 result->pipeline_statistics.gs_primitives,
780 result->pipeline_statistics.c_invocations,
781 result->pipeline_statistics.c_primitives,
782 result->pipeline_statistics.ps_invocations,
783 result->pipeline_statistics.cs_invocations);
784 #endif
785 break;
786 default:
787 assert(0);
788 }
789
790 return TRUE;
791 }
792
793 static boolean r600_get_query_result(struct pipe_context *ctx,
794 struct pipe_query *query,
795 boolean wait, union pipe_query_result *result)
796 {
797 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
798 struct r600_query *rquery = (struct r600_query *)query;
799 struct r600_query_buffer *qbuf;
800
801 util_query_clear_result(result, rquery->type);
802
803 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
804 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
805 return FALSE;
806 }
807 }
808
809 /* Convert the time to expected units. */
810 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
811 rquery->type == PIPE_QUERY_TIMESTAMP) {
812 result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
813 }
814 return TRUE;
815 }
816
817 static void r600_render_condition(struct pipe_context *ctx,
818 struct pipe_query *query,
819 boolean condition,
820 uint mode)
821 {
822 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
823 struct r600_query *rquery = (struct r600_query *)query;
824 bool wait_flag = false;
825
826 rctx->current_render_cond = query;
827 rctx->current_render_cond_cond = condition;
828 rctx->current_render_cond_mode = mode;
829
830 if (query == NULL) {
831 if (rctx->predicate_drawing) {
832 rctx->predicate_drawing = false;
833 r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false);
834 }
835 return;
836 }
837
838 if (mode == PIPE_RENDER_COND_WAIT ||
839 mode == PIPE_RENDER_COND_BY_REGION_WAIT) {
840 wait_flag = true;
841 }
842
843 rctx->predicate_drawing = true;
844
845 switch (rquery->type) {
846 case PIPE_QUERY_OCCLUSION_COUNTER:
847 case PIPE_QUERY_OCCLUSION_PREDICATE:
848 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag);
849 break;
850 case PIPE_QUERY_PRIMITIVES_EMITTED:
851 case PIPE_QUERY_PRIMITIVES_GENERATED:
852 case PIPE_QUERY_SO_STATISTICS:
853 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
854 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag);
855 break;
856 default:
857 assert(0);
858 }
859 }
860
861 static void r600_suspend_queries(struct r600_common_context *ctx,
862 struct list_head *query_list,
863 unsigned *num_cs_dw_queries_suspend)
864 {
865 struct r600_query *query;
866
867 LIST_FOR_EACH_ENTRY(query, query_list, list) {
868 r600_emit_query_end(ctx, query);
869 }
870 assert(*num_cs_dw_queries_suspend == 0);
871 }
872
873 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
874 {
875 r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
876 &ctx->num_cs_dw_nontimer_queries_suspend);
877 }
878
879 void r600_suspend_timer_queries(struct r600_common_context *ctx)
880 {
881 r600_suspend_queries(ctx, &ctx->active_timer_queries,
882 &ctx->num_cs_dw_timer_queries_suspend);
883 }
884
885 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
886 struct list_head *query_list)
887 {
888 struct r600_query *query;
889 unsigned num_dw = 0;
890
891 LIST_FOR_EACH_ENTRY(query, query_list, list) {
892 /* begin + end */
893 num_dw += query->num_cs_dw * 2;
894
895 /* Workaround for the fact that
896 * num_cs_dw_nontimer_queries_suspend is incremented for every
897 * resumed query, which raises the bar in need_cs_space for
898 * queries about to be resumed.
899 */
900 num_dw += query->num_cs_dw;
901 }
902 /* primitives generated query */
903 num_dw += ctx->streamout.enable_atom.num_dw;
904 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
905 num_dw += 13;
906
907 return num_dw;
908 }
909
910 static void r600_resume_queries(struct r600_common_context *ctx,
911 struct list_head *query_list,
912 unsigned *num_cs_dw_queries_suspend)
913 {
914 struct r600_query *query;
915 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
916
917 assert(*num_cs_dw_queries_suspend == 0);
918
919 /* Check CS space here. Resuming must not be interrupted by flushes. */
920 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
921
922 LIST_FOR_EACH_ENTRY(query, query_list, list) {
923 r600_emit_query_begin(ctx, query);
924 }
925 }
926
927 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
928 {
929 r600_resume_queries(ctx, &ctx->active_nontimer_queries,
930 &ctx->num_cs_dw_nontimer_queries_suspend);
931 }
932
933 void r600_resume_timer_queries(struct r600_common_context *ctx)
934 {
935 r600_resume_queries(ctx, &ctx->active_timer_queries,
936 &ctx->num_cs_dw_timer_queries_suspend);
937 }
938
939 /* Get backends mask */
940 void r600_query_init_backend_mask(struct r600_common_context *ctx)
941 {
942 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
943 struct r600_resource *buffer;
944 uint32_t *results;
945 unsigned num_backends = ctx->screen->info.r600_num_backends;
946 unsigned i, mask = 0;
947
948 /* if backend_map query is supported by the kernel */
949 if (ctx->screen->info.r600_backend_map_valid) {
950 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
951 unsigned backend_map = ctx->screen->info.r600_backend_map;
952 unsigned item_width, item_mask;
953
954 if (ctx->chip_class >= EVERGREEN) {
955 item_width = 4;
956 item_mask = 0x7;
957 } else {
958 item_width = 2;
959 item_mask = 0x3;
960 }
961
962 while(num_tile_pipes--) {
963 i = backend_map & item_mask;
964 mask |= (1<<i);
965 backend_map >>= item_width;
966 }
967 if (mask != 0) {
968 ctx->backend_mask = mask;
969 return;
970 }
971 }
972
973 /* otherwise backup path for older kernels */
974
975 /* create buffer for event data */
976 buffer = (struct r600_resource*)
977 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
978 PIPE_USAGE_STAGING, ctx->max_db*16);
979 if (!buffer)
980 goto err;
981
982 /* initialize buffer with zeroes */
983 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
984 if (results) {
985 memset(results, 0, ctx->max_db * 4 * 4);
986
987 /* emit EVENT_WRITE for ZPASS_DONE */
988 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
989 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
990 radeon_emit(cs, buffer->gpu_address);
991 radeon_emit(cs, buffer->gpu_address >> 32);
992
993 r600_emit_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
994
995 /* analyze results */
996 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
997 if (results) {
998 for(i = 0; i < ctx->max_db; i++) {
999 /* at least highest bit will be set if backend is used */
1000 if (results[i*4 + 1])
1001 mask |= (1<<i);
1002 }
1003 }
1004 }
1005
1006 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
1007
1008 if (mask != 0) {
1009 ctx->backend_mask = mask;
1010 return;
1011 }
1012
1013 err:
1014 /* fallback to old method - set num_backends lower bits to 1 */
1015 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
1016 return;
1017 }
1018
1019 void r600_query_init(struct r600_common_context *rctx)
1020 {
1021 rctx->b.create_query = r600_create_query;
1022 rctx->b.destroy_query = r600_destroy_query;
1023 rctx->b.begin_query = r600_begin_query;
1024 rctx->b.end_query = r600_end_query;
1025 rctx->b.get_query_result = r600_get_query_result;
1026
1027 if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
1028 rctx->b.render_condition = r600_render_condition;
1029
1030 LIST_INITHEAD(&rctx->active_nontimer_queries);
1031 LIST_INITHEAD(&rctx->active_timer_queries);
1032 }