radeonsi: direct emit intrinsic for DFRAC.
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_cs.h"
26 #include "util/u_memory.h"
27
28
29 struct r600_query_buffer {
30 /* The buffer where query results are stored. */
31 struct r600_resource *buf;
32 /* Offset of the next free result after current query data */
33 unsigned results_end;
34 /* If a query buffer is full, a new buffer is created and the old one
35 * is put in here. When we calculate the result, we sum up the samples
36 * from all buffers. */
37 struct r600_query_buffer *previous;
38 };
39
40 struct r600_query {
41 /* The query buffer and how many results are in it. */
42 struct r600_query_buffer buffer;
43 /* The type of query */
44 unsigned type;
45 /* Size of the result in memory for both begin_query and end_query,
46 * this can be one or two numbers, or it could even be a size of a structure. */
47 unsigned result_size;
48 /* The number of dwords for begin_query or end_query. */
49 unsigned num_cs_dw;
50 /* linked list of queries */
51 struct list_head list;
52 /* for custom non-GPU queries */
53 uint64_t begin_result;
54 uint64_t end_result;
55 /* Fence for GPU_FINISHED. */
56 struct pipe_fence_handle *fence;
57 };
58
59
60 static bool r600_is_timer_query(unsigned type)
61 {
62 return type == PIPE_QUERY_TIME_ELAPSED ||
63 type == PIPE_QUERY_TIMESTAMP;
64 }
65
66 static bool r600_query_needs_begin(unsigned type)
67 {
68 return type != PIPE_QUERY_GPU_FINISHED &&
69 type != PIPE_QUERY_TIMESTAMP;
70 }
71
72 static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type)
73 {
74 unsigned j, i, num_results, buf_size = 4096;
75 uint32_t *results;
76
77 /* Non-GPU queries. */
78 switch (type) {
79 case PIPE_QUERY_TIMESTAMP_DISJOINT:
80 case PIPE_QUERY_GPU_FINISHED:
81 case R600_QUERY_DRAW_CALLS:
82 case R600_QUERY_REQUESTED_VRAM:
83 case R600_QUERY_REQUESTED_GTT:
84 case R600_QUERY_BUFFER_WAIT_TIME:
85 case R600_QUERY_NUM_CS_FLUSHES:
86 case R600_QUERY_NUM_BYTES_MOVED:
87 case R600_QUERY_VRAM_USAGE:
88 case R600_QUERY_GTT_USAGE:
89 case R600_QUERY_GPU_TEMPERATURE:
90 case R600_QUERY_CURRENT_GPU_SCLK:
91 case R600_QUERY_CURRENT_GPU_MCLK:
92 case R600_QUERY_GPU_LOAD:
93 return NULL;
94 }
95
96 /* Queries are normally read by the CPU after
97 * being written by the gpu, hence staging is probably a good
98 * usage pattern.
99 */
100 struct r600_resource *buf = (struct r600_resource*)
101 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
102 PIPE_USAGE_STAGING, buf_size);
103
104 switch (type) {
105 case PIPE_QUERY_OCCLUSION_COUNTER:
106 case PIPE_QUERY_OCCLUSION_PREDICATE:
107 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
108 memset(results, 0, buf_size);
109
110 /* Set top bits for unused backends. */
111 num_results = buf_size / (16 * ctx->max_db);
112 for (j = 0; j < num_results; j++) {
113 for (i = 0; i < ctx->max_db; i++) {
114 if (!(ctx->backend_mask & (1<<i))) {
115 results[(i * 4)+1] = 0x80000000;
116 results[(i * 4)+3] = 0x80000000;
117 }
118 }
119 results += 4 * ctx->max_db;
120 }
121 ctx->ws->buffer_unmap(buf->cs_buf);
122 break;
123 case PIPE_QUERY_TIME_ELAPSED:
124 case PIPE_QUERY_TIMESTAMP:
125 break;
126 case PIPE_QUERY_PRIMITIVES_EMITTED:
127 case PIPE_QUERY_PRIMITIVES_GENERATED:
128 case PIPE_QUERY_SO_STATISTICS:
129 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
130 case PIPE_QUERY_PIPELINE_STATISTICS:
131 results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
132 memset(results, 0, buf_size);
133 ctx->ws->buffer_unmap(buf->cs_buf);
134 break;
135 default:
136 assert(0);
137 }
138 return buf;
139 }
140
141 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
142 unsigned type, int diff)
143 {
144 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
145 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
146 bool old_enable = rctx->num_occlusion_queries != 0;
147 bool enable;
148
149 rctx->num_occlusion_queries += diff;
150 assert(rctx->num_occlusion_queries >= 0);
151
152 enable = rctx->num_occlusion_queries != 0;
153
154 if (enable != old_enable) {
155 rctx->set_occlusion_query_state(&rctx->b, enable);
156 }
157 }
158 }
159
160 static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
161 {
162 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
163 uint64_t va;
164
165 r600_update_occlusion_query_state(ctx, query->type, 1);
166 r600_update_prims_generated_query_state(ctx, query->type, 1);
167 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
168
169 /* Get a new query buffer if needed. */
170 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
171 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
172 *qbuf = query->buffer;
173 query->buffer.buf = r600_new_query_buffer(ctx, query->type);
174 query->buffer.results_end = 0;
175 query->buffer.previous = qbuf;
176 }
177
178 /* emit begin query */
179 va = query->buffer.buf->gpu_address + query->buffer.results_end;
180
181 switch (query->type) {
182 case PIPE_QUERY_OCCLUSION_COUNTER:
183 case PIPE_QUERY_OCCLUSION_PREDICATE:
184 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
185 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
186 radeon_emit(cs, va);
187 radeon_emit(cs, (va >> 32UL) & 0xFF);
188 break;
189 case PIPE_QUERY_PRIMITIVES_EMITTED:
190 case PIPE_QUERY_PRIMITIVES_GENERATED:
191 case PIPE_QUERY_SO_STATISTICS:
192 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
193 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
194 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3));
195 radeon_emit(cs, va);
196 radeon_emit(cs, (va >> 32UL) & 0xFF);
197 break;
198 case PIPE_QUERY_TIME_ELAPSED:
199 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
200 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
201 radeon_emit(cs, va);
202 radeon_emit(cs, (3 << 29) | ((va >> 32UL) & 0xFF));
203 radeon_emit(cs, 0);
204 radeon_emit(cs, 0);
205 break;
206 case PIPE_QUERY_PIPELINE_STATISTICS:
207 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
208 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
209 radeon_emit(cs, va);
210 radeon_emit(cs, (va >> 32UL) & 0xFF);
211 break;
212 default:
213 assert(0);
214 }
215 r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
216 RADEON_PRIO_MIN);
217
218 if (!r600_is_timer_query(query->type)) {
219 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
220 }
221 }
222
223 static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
224 {
225 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
226 uint64_t va;
227
228 /* The queries which need begin already called this in begin_query. */
229 if (!r600_query_needs_begin(query->type)) {
230 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
231 }
232
233 va = query->buffer.buf->gpu_address;
234
235 /* emit end query */
236 switch (query->type) {
237 case PIPE_QUERY_OCCLUSION_COUNTER:
238 case PIPE_QUERY_OCCLUSION_PREDICATE:
239 va += query->buffer.results_end + 8;
240 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
241 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
242 radeon_emit(cs, va);
243 radeon_emit(cs, (va >> 32UL) & 0xFF);
244 break;
245 case PIPE_QUERY_PRIMITIVES_EMITTED:
246 case PIPE_QUERY_PRIMITIVES_GENERATED:
247 case PIPE_QUERY_SO_STATISTICS:
248 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
249 va += query->buffer.results_end + query->result_size/2;
250 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
251 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3));
252 radeon_emit(cs, va);
253 radeon_emit(cs, (va >> 32UL) & 0xFF);
254 break;
255 case PIPE_QUERY_TIME_ELAPSED:
256 va += query->buffer.results_end + query->result_size/2;
257 /* fall through */
258 case PIPE_QUERY_TIMESTAMP:
259 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
260 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
261 radeon_emit(cs, va);
262 radeon_emit(cs, (3 << 29) | ((va >> 32UL) & 0xFF));
263 radeon_emit(cs, 0);
264 radeon_emit(cs, 0);
265 break;
266 case PIPE_QUERY_PIPELINE_STATISTICS:
267 va += query->buffer.results_end + query->result_size/2;
268 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
269 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
270 radeon_emit(cs, va);
271 radeon_emit(cs, (va >> 32UL) & 0xFF);
272 break;
273 default:
274 assert(0);
275 }
276 r600_emit_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE,
277 RADEON_PRIO_MIN);
278
279 query->buffer.results_end += query->result_size;
280
281 if (r600_query_needs_begin(query->type)) {
282 if (!r600_is_timer_query(query->type)) {
283 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
284 }
285 }
286
287 r600_update_occlusion_query_state(ctx, query->type, -1);
288 r600_update_prims_generated_query_state(ctx, query->type, -1);
289 }
290
291 static void r600_emit_query_predication(struct r600_common_context *ctx, struct r600_query *query,
292 int operation, bool flag_wait)
293 {
294 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
295
296 if (operation == PREDICATION_OP_CLEAR) {
297 ctx->need_gfx_cs_space(&ctx->b, 3, FALSE);
298
299 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
300 radeon_emit(cs, 0);
301 radeon_emit(cs, PRED_OP(PREDICATION_OP_CLEAR));
302 } else {
303 struct r600_query_buffer *qbuf;
304 unsigned count;
305 uint32_t op;
306
307 /* Find how many results there are. */
308 count = 0;
309 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
310 count += qbuf->results_end / query->result_size;
311 }
312
313 ctx->need_gfx_cs_space(&ctx->b, 5 * count, TRUE);
314
315 op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE |
316 (flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW);
317
318 /* emit predicate packets for all data blocks */
319 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
320 unsigned results_base = 0;
321 uint64_t va = qbuf->buf->gpu_address;
322
323 while (results_base < qbuf->results_end) {
324 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
325 radeon_emit(cs, (va + results_base) & 0xFFFFFFFFUL);
326 radeon_emit(cs, op | (((va + results_base) >> 32UL) & 0xFF));
327 r600_emit_reloc(ctx, &ctx->rings.gfx, qbuf->buf, RADEON_USAGE_READ,
328 RADEON_PRIO_MIN);
329 results_base += query->result_size;
330
331 /* set CONTINUE bit for all packets except the first */
332 op |= PREDICATION_CONTINUE;
333 }
334 }
335 }
336 }
337
338 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
339 {
340 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
341 struct r600_query *query;
342 bool skip_allocation = false;
343
344 query = CALLOC_STRUCT(r600_query);
345 if (query == NULL)
346 return NULL;
347
348 query->type = query_type;
349
350 switch (query_type) {
351 case PIPE_QUERY_OCCLUSION_COUNTER:
352 case PIPE_QUERY_OCCLUSION_PREDICATE:
353 query->result_size = 16 * rctx->max_db;
354 query->num_cs_dw = 6;
355 break;
356 break;
357 case PIPE_QUERY_TIME_ELAPSED:
358 query->result_size = 16;
359 query->num_cs_dw = 8;
360 break;
361 case PIPE_QUERY_TIMESTAMP:
362 query->result_size = 8;
363 query->num_cs_dw = 8;
364 break;
365 case PIPE_QUERY_PRIMITIVES_EMITTED:
366 case PIPE_QUERY_PRIMITIVES_GENERATED:
367 case PIPE_QUERY_SO_STATISTICS:
368 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
369 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
370 query->result_size = 32;
371 query->num_cs_dw = 6;
372 break;
373 case PIPE_QUERY_PIPELINE_STATISTICS:
374 /* 11 values on EG, 8 on R600. */
375 query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
376 query->num_cs_dw = 6;
377 break;
378 /* Non-GPU queries and queries not requiring a buffer. */
379 case PIPE_QUERY_TIMESTAMP_DISJOINT:
380 case PIPE_QUERY_GPU_FINISHED:
381 case R600_QUERY_DRAW_CALLS:
382 case R600_QUERY_REQUESTED_VRAM:
383 case R600_QUERY_REQUESTED_GTT:
384 case R600_QUERY_BUFFER_WAIT_TIME:
385 case R600_QUERY_NUM_CS_FLUSHES:
386 case R600_QUERY_NUM_BYTES_MOVED:
387 case R600_QUERY_VRAM_USAGE:
388 case R600_QUERY_GTT_USAGE:
389 case R600_QUERY_GPU_TEMPERATURE:
390 case R600_QUERY_CURRENT_GPU_SCLK:
391 case R600_QUERY_CURRENT_GPU_MCLK:
392 case R600_QUERY_GPU_LOAD:
393 skip_allocation = true;
394 break;
395 default:
396 assert(0);
397 FREE(query);
398 return NULL;
399 }
400
401 if (!skip_allocation) {
402 query->buffer.buf = r600_new_query_buffer(rctx, query_type);
403 if (!query->buffer.buf) {
404 FREE(query);
405 return NULL;
406 }
407 }
408 return (struct pipe_query*)query;
409 }
410
411 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
412 {
413 struct r600_query *rquery = (struct r600_query*)query;
414 struct r600_query_buffer *prev = rquery->buffer.previous;
415
416 /* Release all query buffers. */
417 while (prev) {
418 struct r600_query_buffer *qbuf = prev;
419 prev = prev->previous;
420 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
421 FREE(qbuf);
422 }
423
424 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
425 FREE(query);
426 }
427
428 static boolean r600_begin_query(struct pipe_context *ctx,
429 struct pipe_query *query)
430 {
431 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
432 struct r600_query *rquery = (struct r600_query *)query;
433 struct r600_query_buffer *prev = rquery->buffer.previous;
434
435 if (!r600_query_needs_begin(rquery->type)) {
436 assert(0);
437 return false;
438 }
439
440 /* Non-GPU queries. */
441 switch (rquery->type) {
442 case PIPE_QUERY_TIMESTAMP_DISJOINT:
443 return true;
444 case R600_QUERY_DRAW_CALLS:
445 rquery->begin_result = rctx->num_draw_calls;
446 return true;
447 case R600_QUERY_REQUESTED_VRAM:
448 case R600_QUERY_REQUESTED_GTT:
449 case R600_QUERY_VRAM_USAGE:
450 case R600_QUERY_GTT_USAGE:
451 case R600_QUERY_GPU_TEMPERATURE:
452 case R600_QUERY_CURRENT_GPU_SCLK:
453 case R600_QUERY_CURRENT_GPU_MCLK:
454 rquery->begin_result = 0;
455 return true;
456 case R600_QUERY_BUFFER_WAIT_TIME:
457 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS);
458 return true;
459 case R600_QUERY_NUM_CS_FLUSHES:
460 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
461 return true;
462 case R600_QUERY_NUM_BYTES_MOVED:
463 rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
464 return true;
465 case R600_QUERY_GPU_LOAD:
466 rquery->begin_result = r600_gpu_load_begin(rctx->screen);
467 return true;
468 }
469
470 /* Discard the old query buffers. */
471 while (prev) {
472 struct r600_query_buffer *qbuf = prev;
473 prev = prev->previous;
474 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
475 FREE(qbuf);
476 }
477
478 /* Obtain a new buffer if the current one can't be mapped without a stall. */
479 if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
480 rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) {
481 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
482 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
483 }
484
485 rquery->buffer.results_end = 0;
486 rquery->buffer.previous = NULL;
487
488 r600_emit_query_begin(rctx, rquery);
489
490 if (!r600_is_timer_query(rquery->type)) {
491 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
492 }
493 return true;
494 }
495
496 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
497 {
498 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
499 struct r600_query *rquery = (struct r600_query *)query;
500
501 /* Non-GPU queries. */
502 switch (rquery->type) {
503 case PIPE_QUERY_TIMESTAMP_DISJOINT:
504 return;
505 case PIPE_QUERY_GPU_FINISHED:
506 rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC, &rquery->fence);
507 return;
508 case R600_QUERY_DRAW_CALLS:
509 rquery->end_result = rctx->num_draw_calls;
510 return;
511 case R600_QUERY_REQUESTED_VRAM:
512 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY);
513 return;
514 case R600_QUERY_REQUESTED_GTT:
515 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY);
516 return;
517 case R600_QUERY_BUFFER_WAIT_TIME:
518 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS);
519 return;
520 case R600_QUERY_NUM_CS_FLUSHES:
521 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_CS_FLUSHES);
522 return;
523 case R600_QUERY_NUM_BYTES_MOVED:
524 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_NUM_BYTES_MOVED);
525 return;
526 case R600_QUERY_VRAM_USAGE:
527 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_VRAM_USAGE);
528 return;
529 case R600_QUERY_GTT_USAGE:
530 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GTT_USAGE);
531 return;
532 case R600_QUERY_GPU_TEMPERATURE:
533 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_GPU_TEMPERATURE) / 1000;
534 return;
535 case R600_QUERY_CURRENT_GPU_SCLK:
536 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_SCLK) * 1000000;
537 return;
538 case R600_QUERY_CURRENT_GPU_MCLK:
539 rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_CURRENT_MCLK) * 1000000;
540 return;
541 case R600_QUERY_GPU_LOAD:
542 rquery->end_result = r600_gpu_load_end(rctx->screen, rquery->begin_result);
543 return;
544 }
545
546 r600_emit_query_end(rctx, rquery);
547
548 if (r600_query_needs_begin(rquery->type) && !r600_is_timer_query(rquery->type)) {
549 LIST_DELINIT(&rquery->list);
550 }
551 }
552
553 static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
554 bool test_status_bit)
555 {
556 uint32_t *current_result = (uint32_t*)map;
557 uint64_t start, end;
558
559 start = (uint64_t)current_result[start_index] |
560 (uint64_t)current_result[start_index+1] << 32;
561 end = (uint64_t)current_result[end_index] |
562 (uint64_t)current_result[end_index+1] << 32;
563
564 if (!test_status_bit ||
565 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
566 return end - start;
567 }
568 return 0;
569 }
570
571 static boolean r600_get_query_buffer_result(struct r600_common_context *ctx,
572 struct r600_query *query,
573 struct r600_query_buffer *qbuf,
574 boolean wait,
575 union pipe_query_result *result)
576 {
577 struct pipe_screen *screen = ctx->b.screen;
578 unsigned results_base = 0;
579 char *map;
580
581 /* Non-GPU queries. */
582 switch (query->type) {
583 case PIPE_QUERY_TIMESTAMP_DISJOINT:
584 /* Convert from cycles per millisecond to cycles per second (Hz). */
585 result->timestamp_disjoint.frequency =
586 (uint64_t)ctx->screen->info.r600_clock_crystal_freq * 1000;
587 result->timestamp_disjoint.disjoint = FALSE;
588 return TRUE;
589 case PIPE_QUERY_GPU_FINISHED:
590 result->b = screen->fence_finish(screen, query->fence,
591 wait ? PIPE_TIMEOUT_INFINITE : 0);
592 return result->b;
593 case R600_QUERY_DRAW_CALLS:
594 case R600_QUERY_REQUESTED_VRAM:
595 case R600_QUERY_REQUESTED_GTT:
596 case R600_QUERY_BUFFER_WAIT_TIME:
597 case R600_QUERY_NUM_CS_FLUSHES:
598 case R600_QUERY_NUM_BYTES_MOVED:
599 case R600_QUERY_VRAM_USAGE:
600 case R600_QUERY_GTT_USAGE:
601 case R600_QUERY_GPU_TEMPERATURE:
602 case R600_QUERY_CURRENT_GPU_SCLK:
603 case R600_QUERY_CURRENT_GPU_MCLK:
604 result->u64 = query->end_result - query->begin_result;
605 return TRUE;
606 case R600_QUERY_GPU_LOAD:
607 result->u64 = query->end_result;
608 return TRUE;
609 }
610
611 map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf,
612 PIPE_TRANSFER_READ |
613 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
614 if (!map)
615 return FALSE;
616
617 /* count all results across all data blocks */
618 switch (query->type) {
619 case PIPE_QUERY_OCCLUSION_COUNTER:
620 while (results_base != qbuf->results_end) {
621 result->u64 +=
622 r600_query_read_result(map + results_base, 0, 2, true);
623 results_base += 16;
624 }
625 break;
626 case PIPE_QUERY_OCCLUSION_PREDICATE:
627 while (results_base != qbuf->results_end) {
628 result->b = result->b ||
629 r600_query_read_result(map + results_base, 0, 2, true) != 0;
630 results_base += 16;
631 }
632 break;
633 case PIPE_QUERY_TIME_ELAPSED:
634 while (results_base != qbuf->results_end) {
635 result->u64 +=
636 r600_query_read_result(map + results_base, 0, 2, false);
637 results_base += query->result_size;
638 }
639 break;
640 case PIPE_QUERY_TIMESTAMP:
641 {
642 uint32_t *current_result = (uint32_t*)map;
643 result->u64 = (uint64_t)current_result[0] |
644 (uint64_t)current_result[1] << 32;
645 break;
646 }
647 case PIPE_QUERY_PRIMITIVES_EMITTED:
648 /* SAMPLE_STREAMOUTSTATS stores this structure:
649 * {
650 * u64 NumPrimitivesWritten;
651 * u64 PrimitiveStorageNeeded;
652 * }
653 * We only need NumPrimitivesWritten here. */
654 while (results_base != qbuf->results_end) {
655 result->u64 +=
656 r600_query_read_result(map + results_base, 2, 6, true);
657 results_base += query->result_size;
658 }
659 break;
660 case PIPE_QUERY_PRIMITIVES_GENERATED:
661 /* Here we read PrimitiveStorageNeeded. */
662 while (results_base != qbuf->results_end) {
663 result->u64 +=
664 r600_query_read_result(map + results_base, 0, 4, true);
665 results_base += query->result_size;
666 }
667 break;
668 case PIPE_QUERY_SO_STATISTICS:
669 while (results_base != qbuf->results_end) {
670 result->so_statistics.num_primitives_written +=
671 r600_query_read_result(map + results_base, 2, 6, true);
672 result->so_statistics.primitives_storage_needed +=
673 r600_query_read_result(map + results_base, 0, 4, true);
674 results_base += query->result_size;
675 }
676 break;
677 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
678 while (results_base != qbuf->results_end) {
679 result->b = result->b ||
680 r600_query_read_result(map + results_base, 2, 6, true) !=
681 r600_query_read_result(map + results_base, 0, 4, true);
682 results_base += query->result_size;
683 }
684 break;
685 case PIPE_QUERY_PIPELINE_STATISTICS:
686 if (ctx->chip_class >= EVERGREEN) {
687 while (results_base != qbuf->results_end) {
688 result->pipeline_statistics.ps_invocations +=
689 r600_query_read_result(map + results_base, 0, 22, false);
690 result->pipeline_statistics.c_primitives +=
691 r600_query_read_result(map + results_base, 2, 24, false);
692 result->pipeline_statistics.c_invocations +=
693 r600_query_read_result(map + results_base, 4, 26, false);
694 result->pipeline_statistics.vs_invocations +=
695 r600_query_read_result(map + results_base, 6, 28, false);
696 result->pipeline_statistics.gs_invocations +=
697 r600_query_read_result(map + results_base, 8, 30, false);
698 result->pipeline_statistics.gs_primitives +=
699 r600_query_read_result(map + results_base, 10, 32, false);
700 result->pipeline_statistics.ia_primitives +=
701 r600_query_read_result(map + results_base, 12, 34, false);
702 result->pipeline_statistics.ia_vertices +=
703 r600_query_read_result(map + results_base, 14, 36, false);
704 result->pipeline_statistics.hs_invocations +=
705 r600_query_read_result(map + results_base, 16, 38, false);
706 result->pipeline_statistics.ds_invocations +=
707 r600_query_read_result(map + results_base, 18, 40, false);
708 result->pipeline_statistics.cs_invocations +=
709 r600_query_read_result(map + results_base, 20, 42, false);
710 results_base += query->result_size;
711 }
712 } else {
713 while (results_base != qbuf->results_end) {
714 result->pipeline_statistics.ps_invocations +=
715 r600_query_read_result(map + results_base, 0, 16, false);
716 result->pipeline_statistics.c_primitives +=
717 r600_query_read_result(map + results_base, 2, 18, false);
718 result->pipeline_statistics.c_invocations +=
719 r600_query_read_result(map + results_base, 4, 20, false);
720 result->pipeline_statistics.vs_invocations +=
721 r600_query_read_result(map + results_base, 6, 22, false);
722 result->pipeline_statistics.gs_invocations +=
723 r600_query_read_result(map + results_base, 8, 24, false);
724 result->pipeline_statistics.gs_primitives +=
725 r600_query_read_result(map + results_base, 10, 26, false);
726 result->pipeline_statistics.ia_primitives +=
727 r600_query_read_result(map + results_base, 12, 28, false);
728 result->pipeline_statistics.ia_vertices +=
729 r600_query_read_result(map + results_base, 14, 30, false);
730 results_base += query->result_size;
731 }
732 }
733 #if 0 /* for testing */
734 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
735 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
736 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
737 result->pipeline_statistics.ia_vertices,
738 result->pipeline_statistics.ia_primitives,
739 result->pipeline_statistics.vs_invocations,
740 result->pipeline_statistics.hs_invocations,
741 result->pipeline_statistics.ds_invocations,
742 result->pipeline_statistics.gs_invocations,
743 result->pipeline_statistics.gs_primitives,
744 result->pipeline_statistics.c_invocations,
745 result->pipeline_statistics.c_primitives,
746 result->pipeline_statistics.ps_invocations,
747 result->pipeline_statistics.cs_invocations);
748 #endif
749 break;
750 default:
751 assert(0);
752 }
753
754 ctx->ws->buffer_unmap(qbuf->buf->cs_buf);
755 return TRUE;
756 }
757
758 static boolean r600_get_query_result(struct pipe_context *ctx,
759 struct pipe_query *query,
760 boolean wait, union pipe_query_result *result)
761 {
762 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
763 struct r600_query *rquery = (struct r600_query *)query;
764 struct r600_query_buffer *qbuf;
765
766 util_query_clear_result(result, rquery->type);
767
768 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
769 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
770 return FALSE;
771 }
772 }
773
774 /* Convert the time to expected units. */
775 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
776 rquery->type == PIPE_QUERY_TIMESTAMP) {
777 result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
778 }
779 return TRUE;
780 }
781
782 static void r600_render_condition(struct pipe_context *ctx,
783 struct pipe_query *query,
784 boolean condition,
785 uint mode)
786 {
787 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
788 struct r600_query *rquery = (struct r600_query *)query;
789 bool wait_flag = false;
790
791 rctx->current_render_cond = query;
792 rctx->current_render_cond_cond = condition;
793 rctx->current_render_cond_mode = mode;
794
795 if (query == NULL) {
796 if (rctx->predicate_drawing) {
797 rctx->predicate_drawing = false;
798 r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false);
799 }
800 return;
801 }
802
803 if (mode == PIPE_RENDER_COND_WAIT ||
804 mode == PIPE_RENDER_COND_BY_REGION_WAIT) {
805 wait_flag = true;
806 }
807
808 rctx->predicate_drawing = true;
809
810 switch (rquery->type) {
811 case PIPE_QUERY_OCCLUSION_COUNTER:
812 case PIPE_QUERY_OCCLUSION_PREDICATE:
813 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag);
814 break;
815 case PIPE_QUERY_PRIMITIVES_EMITTED:
816 case PIPE_QUERY_PRIMITIVES_GENERATED:
817 case PIPE_QUERY_SO_STATISTICS:
818 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
819 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag);
820 break;
821 default:
822 assert(0);
823 }
824 }
825
826 void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
827 {
828 struct r600_query *query;
829
830 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
831 r600_emit_query_end(ctx, query);
832 }
833 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
834 }
835
836 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx)
837 {
838 struct r600_query *query;
839 unsigned num_dw = 0;
840
841 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
842 /* begin + end */
843 num_dw += query->num_cs_dw * 2;
844
845 /* Workaround for the fact that
846 * num_cs_dw_nontimer_queries_suspend is incremented for every
847 * resumed query, which raises the bar in need_cs_space for
848 * queries about to be resumed.
849 */
850 num_dw += query->num_cs_dw;
851 }
852 /* primitives generated query */
853 num_dw += ctx->streamout.enable_atom.num_dw;
854 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
855 num_dw += 13;
856
857 return num_dw;
858 }
859
860 void r600_resume_nontimer_queries(struct r600_common_context *ctx)
861 {
862 struct r600_query *query;
863
864 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
865
866 /* Check CS space here. Resuming must not be interrupted by flushes. */
867 ctx->need_gfx_cs_space(&ctx->b,
868 r600_queries_num_cs_dw_for_resuming(ctx), TRUE);
869
870 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
871 r600_emit_query_begin(ctx, query);
872 }
873 }
874
875 /* Get backends mask */
876 void r600_query_init_backend_mask(struct r600_common_context *ctx)
877 {
878 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
879 struct r600_resource *buffer;
880 uint32_t *results;
881 unsigned num_backends = ctx->screen->info.r600_num_backends;
882 unsigned i, mask = 0;
883
884 /* if backend_map query is supported by the kernel */
885 if (ctx->screen->info.r600_backend_map_valid) {
886 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
887 unsigned backend_map = ctx->screen->info.r600_backend_map;
888 unsigned item_width, item_mask;
889
890 if (ctx->chip_class >= EVERGREEN) {
891 item_width = 4;
892 item_mask = 0x7;
893 } else {
894 item_width = 2;
895 item_mask = 0x3;
896 }
897
898 while(num_tile_pipes--) {
899 i = backend_map & item_mask;
900 mask |= (1<<i);
901 backend_map >>= item_width;
902 }
903 if (mask != 0) {
904 ctx->backend_mask = mask;
905 return;
906 }
907 }
908
909 /* otherwise backup path for older kernels */
910
911 /* create buffer for event data */
912 buffer = (struct r600_resource*)
913 pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
914 PIPE_USAGE_STAGING, ctx->max_db*16);
915 if (!buffer)
916 goto err;
917
918 /* initialize buffer with zeroes */
919 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
920 if (results) {
921 memset(results, 0, ctx->max_db * 4 * 4);
922 ctx->ws->buffer_unmap(buffer->cs_buf);
923
924 /* emit EVENT_WRITE for ZPASS_DONE */
925 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
926 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
927 radeon_emit(cs, buffer->gpu_address);
928 radeon_emit(cs, buffer->gpu_address >> 32);
929
930 r600_emit_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
931
932 /* analyze results */
933 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
934 if (results) {
935 for(i = 0; i < ctx->max_db; i++) {
936 /* at least highest bit will be set if backend is used */
937 if (results[i*4 + 1])
938 mask |= (1<<i);
939 }
940 ctx->ws->buffer_unmap(buffer->cs_buf);
941 }
942 }
943
944 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
945
946 if (mask != 0) {
947 ctx->backend_mask = mask;
948 return;
949 }
950
951 err:
952 /* fallback to old method - set num_backends lower bits to 1 */
953 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
954 return;
955 }
956
957 void r600_query_init(struct r600_common_context *rctx)
958 {
959 rctx->b.create_query = r600_create_query;
960 rctx->b.destroy_query = r600_destroy_query;
961 rctx->b.begin_query = r600_begin_query;
962 rctx->b.end_query = r600_end_query;
963 rctx->b.get_query_result = r600_get_query_result;
964
965 if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
966 rctx->b.render_condition = r600_render_condition;
967
968 LIST_INITHEAD(&rctx->active_nontimer_queries);
969 }