radeon/uvd: reconstruct MJPEG bitstream
[mesa.git] / src / gallium / drivers / radeon / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "r600_query.h"
26 #include "r600_cs.h"
27 #include "util/u_memory.h"
28 #include "util/u_upload_mgr.h"
29 #include "os/os_time.h"
30 #include "tgsi/tgsi_text.h"
31
32 #define R600_MAX_STREAMS 4
33
34 struct r600_hw_query_params {
35 unsigned start_offset;
36 unsigned end_offset;
37 unsigned fence_offset;
38 unsigned pair_stride;
39 unsigned pair_count;
40 };
41
42 /* Queries without buffer handling or suspend/resume. */
43 struct r600_query_sw {
44 struct r600_query b;
45
46 uint64_t begin_result;
47 uint64_t end_result;
48
49 uint64_t begin_time;
50 uint64_t end_time;
51
52 /* Fence for GPU_FINISHED. */
53 struct pipe_fence_handle *fence;
54 };
55
56 static void r600_query_sw_destroy(struct r600_common_screen *rscreen,
57 struct r600_query *rquery)
58 {
59 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
60
61 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL);
62 FREE(query);
63 }
64
65 static enum radeon_value_id winsys_id_from_type(unsigned type)
66 {
67 switch (type) {
68 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
69 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
70 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
71 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
72 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
73 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
74 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
75 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
76 case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
77 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
78 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
79 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
80 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
81 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
82 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
83 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
84 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
85 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
86 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
87 default: unreachable("query type does not correspond to winsys id");
88 }
89 }
90
91 static bool r600_query_sw_begin(struct r600_common_context *rctx,
92 struct r600_query *rquery)
93 {
94 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
95 enum radeon_value_id ws_id;
96
97 switch(query->b.type) {
98 case PIPE_QUERY_TIMESTAMP_DISJOINT:
99 case PIPE_QUERY_GPU_FINISHED:
100 break;
101 case R600_QUERY_DRAW_CALLS:
102 query->begin_result = rctx->num_draw_calls;
103 break;
104 case R600_QUERY_DECOMPRESS_CALLS:
105 query->begin_result = rctx->num_decompress_calls;
106 break;
107 case R600_QUERY_MRT_DRAW_CALLS:
108 query->begin_result = rctx->num_mrt_draw_calls;
109 break;
110 case R600_QUERY_PRIM_RESTART_CALLS:
111 query->begin_result = rctx->num_prim_restart_calls;
112 break;
113 case R600_QUERY_SPILL_DRAW_CALLS:
114 query->begin_result = rctx->num_spill_draw_calls;
115 break;
116 case R600_QUERY_COMPUTE_CALLS:
117 query->begin_result = rctx->num_compute_calls;
118 break;
119 case R600_QUERY_SPILL_COMPUTE_CALLS:
120 query->begin_result = rctx->num_spill_compute_calls;
121 break;
122 case R600_QUERY_DMA_CALLS:
123 query->begin_result = rctx->num_dma_calls;
124 break;
125 case R600_QUERY_CP_DMA_CALLS:
126 query->begin_result = rctx->num_cp_dma_calls;
127 break;
128 case R600_QUERY_NUM_VS_FLUSHES:
129 query->begin_result = rctx->num_vs_flushes;
130 break;
131 case R600_QUERY_NUM_PS_FLUSHES:
132 query->begin_result = rctx->num_ps_flushes;
133 break;
134 case R600_QUERY_NUM_CS_FLUSHES:
135 query->begin_result = rctx->num_cs_flushes;
136 break;
137 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
138 query->begin_result = rctx->num_cb_cache_flushes;
139 break;
140 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
141 query->begin_result = rctx->num_db_cache_flushes;
142 break;
143 case R600_QUERY_NUM_L2_INVALIDATES:
144 query->begin_result = rctx->num_L2_invalidates;
145 break;
146 case R600_QUERY_NUM_L2_WRITEBACKS:
147 query->begin_result = rctx->num_L2_writebacks;
148 break;
149 case R600_QUERY_NUM_RESIDENT_HANDLES:
150 query->begin_result = rctx->num_resident_handles;
151 break;
152 case R600_QUERY_TC_OFFLOADED_SLOTS:
153 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
154 break;
155 case R600_QUERY_TC_DIRECT_SLOTS:
156 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
157 break;
158 case R600_QUERY_TC_NUM_SYNCS:
159 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
160 break;
161 case R600_QUERY_REQUESTED_VRAM:
162 case R600_QUERY_REQUESTED_GTT:
163 case R600_QUERY_MAPPED_VRAM:
164 case R600_QUERY_MAPPED_GTT:
165 case R600_QUERY_VRAM_USAGE:
166 case R600_QUERY_VRAM_VIS_USAGE:
167 case R600_QUERY_GTT_USAGE:
168 case R600_QUERY_GPU_TEMPERATURE:
169 case R600_QUERY_CURRENT_GPU_SCLK:
170 case R600_QUERY_CURRENT_GPU_MCLK:
171 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
172 case R600_QUERY_NUM_MAPPED_BUFFERS:
173 query->begin_result = 0;
174 break;
175 case R600_QUERY_BUFFER_WAIT_TIME:
176 case R600_QUERY_NUM_GFX_IBS:
177 case R600_QUERY_NUM_SDMA_IBS:
178 case R600_QUERY_NUM_BYTES_MOVED:
179 case R600_QUERY_NUM_EVICTIONS:
180 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
181 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
182 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
183 break;
184 }
185 case R600_QUERY_GFX_BO_LIST_SIZE:
186 ws_id = winsys_id_from_type(query->b.type);
187 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
188 query->begin_time = rctx->ws->query_value(rctx->ws,
189 RADEON_NUM_GFX_IBS);
190 break;
191 case R600_QUERY_CS_THREAD_BUSY:
192 ws_id = winsys_id_from_type(query->b.type);
193 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
194 query->begin_time = os_time_get_nano();
195 break;
196 case R600_QUERY_GALLIUM_THREAD_BUSY:
197 query->begin_result =
198 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
199 query->begin_time = os_time_get_nano();
200 break;
201 case R600_QUERY_GPU_LOAD:
202 case R600_QUERY_GPU_SHADERS_BUSY:
203 case R600_QUERY_GPU_TA_BUSY:
204 case R600_QUERY_GPU_GDS_BUSY:
205 case R600_QUERY_GPU_VGT_BUSY:
206 case R600_QUERY_GPU_IA_BUSY:
207 case R600_QUERY_GPU_SX_BUSY:
208 case R600_QUERY_GPU_WD_BUSY:
209 case R600_QUERY_GPU_BCI_BUSY:
210 case R600_QUERY_GPU_SC_BUSY:
211 case R600_QUERY_GPU_PA_BUSY:
212 case R600_QUERY_GPU_DB_BUSY:
213 case R600_QUERY_GPU_CP_BUSY:
214 case R600_QUERY_GPU_CB_BUSY:
215 case R600_QUERY_GPU_SDMA_BUSY:
216 case R600_QUERY_GPU_PFP_BUSY:
217 case R600_QUERY_GPU_MEQ_BUSY:
218 case R600_QUERY_GPU_ME_BUSY:
219 case R600_QUERY_GPU_SURF_SYNC_BUSY:
220 case R600_QUERY_GPU_CP_DMA_BUSY:
221 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
222 case R600_QUERY_GPU_CE_BUSY:
223 query->begin_result = r600_begin_counter(rctx->screen,
224 query->b.type);
225 break;
226 case R600_QUERY_NUM_COMPILATIONS:
227 query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
228 break;
229 case R600_QUERY_NUM_SHADERS_CREATED:
230 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
231 break;
232 case R600_QUERY_NUM_SHADER_CACHE_HITS:
233 query->begin_result =
234 p_atomic_read(&rctx->screen->num_shader_cache_hits);
235 break;
236 case R600_QUERY_GPIN_ASIC_ID:
237 case R600_QUERY_GPIN_NUM_SIMD:
238 case R600_QUERY_GPIN_NUM_RB:
239 case R600_QUERY_GPIN_NUM_SPI:
240 case R600_QUERY_GPIN_NUM_SE:
241 break;
242 default:
243 unreachable("r600_query_sw_begin: bad query type");
244 }
245
246 return true;
247 }
248
249 static bool r600_query_sw_end(struct r600_common_context *rctx,
250 struct r600_query *rquery)
251 {
252 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
253 enum radeon_value_id ws_id;
254
255 switch(query->b.type) {
256 case PIPE_QUERY_TIMESTAMP_DISJOINT:
257 break;
258 case PIPE_QUERY_GPU_FINISHED:
259 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
260 break;
261 case R600_QUERY_DRAW_CALLS:
262 query->end_result = rctx->num_draw_calls;
263 break;
264 case R600_QUERY_DECOMPRESS_CALLS:
265 query->end_result = rctx->num_decompress_calls;
266 break;
267 case R600_QUERY_MRT_DRAW_CALLS:
268 query->end_result = rctx->num_mrt_draw_calls;
269 break;
270 case R600_QUERY_PRIM_RESTART_CALLS:
271 query->end_result = rctx->num_prim_restart_calls;
272 break;
273 case R600_QUERY_SPILL_DRAW_CALLS:
274 query->end_result = rctx->num_spill_draw_calls;
275 break;
276 case R600_QUERY_COMPUTE_CALLS:
277 query->end_result = rctx->num_compute_calls;
278 break;
279 case R600_QUERY_SPILL_COMPUTE_CALLS:
280 query->end_result = rctx->num_spill_compute_calls;
281 break;
282 case R600_QUERY_DMA_CALLS:
283 query->end_result = rctx->num_dma_calls;
284 break;
285 case R600_QUERY_CP_DMA_CALLS:
286 query->end_result = rctx->num_cp_dma_calls;
287 break;
288 case R600_QUERY_NUM_VS_FLUSHES:
289 query->end_result = rctx->num_vs_flushes;
290 break;
291 case R600_QUERY_NUM_PS_FLUSHES:
292 query->end_result = rctx->num_ps_flushes;
293 break;
294 case R600_QUERY_NUM_CS_FLUSHES:
295 query->end_result = rctx->num_cs_flushes;
296 break;
297 case R600_QUERY_NUM_CB_CACHE_FLUSHES:
298 query->end_result = rctx->num_cb_cache_flushes;
299 break;
300 case R600_QUERY_NUM_DB_CACHE_FLUSHES:
301 query->end_result = rctx->num_db_cache_flushes;
302 break;
303 case R600_QUERY_NUM_L2_INVALIDATES:
304 query->end_result = rctx->num_L2_invalidates;
305 break;
306 case R600_QUERY_NUM_L2_WRITEBACKS:
307 query->end_result = rctx->num_L2_writebacks;
308 break;
309 case R600_QUERY_NUM_RESIDENT_HANDLES:
310 query->end_result = rctx->num_resident_handles;
311 break;
312 case R600_QUERY_TC_OFFLOADED_SLOTS:
313 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0;
314 break;
315 case R600_QUERY_TC_DIRECT_SLOTS:
316 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0;
317 break;
318 case R600_QUERY_TC_NUM_SYNCS:
319 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
320 break;
321 case R600_QUERY_REQUESTED_VRAM:
322 case R600_QUERY_REQUESTED_GTT:
323 case R600_QUERY_MAPPED_VRAM:
324 case R600_QUERY_MAPPED_GTT:
325 case R600_QUERY_VRAM_USAGE:
326 case R600_QUERY_VRAM_VIS_USAGE:
327 case R600_QUERY_GTT_USAGE:
328 case R600_QUERY_GPU_TEMPERATURE:
329 case R600_QUERY_CURRENT_GPU_SCLK:
330 case R600_QUERY_CURRENT_GPU_MCLK:
331 case R600_QUERY_BUFFER_WAIT_TIME:
332 case R600_QUERY_NUM_MAPPED_BUFFERS:
333 case R600_QUERY_NUM_GFX_IBS:
334 case R600_QUERY_NUM_SDMA_IBS:
335 case R600_QUERY_NUM_BYTES_MOVED:
336 case R600_QUERY_NUM_EVICTIONS:
337 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
338 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
339 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
340 break;
341 }
342 case R600_QUERY_GFX_BO_LIST_SIZE:
343 ws_id = winsys_id_from_type(query->b.type);
344 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
345 query->end_time = rctx->ws->query_value(rctx->ws,
346 RADEON_NUM_GFX_IBS);
347 break;
348 case R600_QUERY_CS_THREAD_BUSY:
349 ws_id = winsys_id_from_type(query->b.type);
350 query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
351 query->end_time = os_time_get_nano();
352 break;
353 case R600_QUERY_GALLIUM_THREAD_BUSY:
354 query->end_result =
355 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0;
356 query->end_time = os_time_get_nano();
357 break;
358 case R600_QUERY_GPU_LOAD:
359 case R600_QUERY_GPU_SHADERS_BUSY:
360 case R600_QUERY_GPU_TA_BUSY:
361 case R600_QUERY_GPU_GDS_BUSY:
362 case R600_QUERY_GPU_VGT_BUSY:
363 case R600_QUERY_GPU_IA_BUSY:
364 case R600_QUERY_GPU_SX_BUSY:
365 case R600_QUERY_GPU_WD_BUSY:
366 case R600_QUERY_GPU_BCI_BUSY:
367 case R600_QUERY_GPU_SC_BUSY:
368 case R600_QUERY_GPU_PA_BUSY:
369 case R600_QUERY_GPU_DB_BUSY:
370 case R600_QUERY_GPU_CP_BUSY:
371 case R600_QUERY_GPU_CB_BUSY:
372 case R600_QUERY_GPU_SDMA_BUSY:
373 case R600_QUERY_GPU_PFP_BUSY:
374 case R600_QUERY_GPU_MEQ_BUSY:
375 case R600_QUERY_GPU_ME_BUSY:
376 case R600_QUERY_GPU_SURF_SYNC_BUSY:
377 case R600_QUERY_GPU_CP_DMA_BUSY:
378 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
379 case R600_QUERY_GPU_CE_BUSY:
380 query->end_result = r600_end_counter(rctx->screen,
381 query->b.type,
382 query->begin_result);
383 query->begin_result = 0;
384 break;
385 case R600_QUERY_NUM_COMPILATIONS:
386 query->end_result = p_atomic_read(&rctx->screen->num_compilations);
387 break;
388 case R600_QUERY_NUM_SHADERS_CREATED:
389 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
390 break;
391 case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
392 query->end_result = rctx->last_tex_ps_draw_ratio;
393 break;
394 case R600_QUERY_NUM_SHADER_CACHE_HITS:
395 query->end_result =
396 p_atomic_read(&rctx->screen->num_shader_cache_hits);
397 break;
398 case R600_QUERY_GPIN_ASIC_ID:
399 case R600_QUERY_GPIN_NUM_SIMD:
400 case R600_QUERY_GPIN_NUM_RB:
401 case R600_QUERY_GPIN_NUM_SPI:
402 case R600_QUERY_GPIN_NUM_SE:
403 break;
404 default:
405 unreachable("r600_query_sw_end: bad query type");
406 }
407
408 return true;
409 }
410
411 static bool r600_query_sw_get_result(struct r600_common_context *rctx,
412 struct r600_query *rquery,
413 bool wait,
414 union pipe_query_result *result)
415 {
416 struct r600_query_sw *query = (struct r600_query_sw *)rquery;
417
418 switch (query->b.type) {
419 case PIPE_QUERY_TIMESTAMP_DISJOINT:
420 /* Convert from cycles per millisecond to cycles per second (Hz). */
421 result->timestamp_disjoint.frequency =
422 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
423 result->timestamp_disjoint.disjoint = false;
424 return true;
425 case PIPE_QUERY_GPU_FINISHED: {
426 struct pipe_screen *screen = rctx->b.screen;
427 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
428
429 result->b = screen->fence_finish(screen, ctx, query->fence,
430 wait ? PIPE_TIMEOUT_INFINITE : 0);
431 return result->b;
432 }
433
434 case R600_QUERY_GFX_BO_LIST_SIZE:
435 result->u64 = (query->end_result - query->begin_result) /
436 (query->end_time - query->begin_time);
437 return true;
438 case R600_QUERY_CS_THREAD_BUSY:
439 case R600_QUERY_GALLIUM_THREAD_BUSY:
440 result->u64 = (query->end_result - query->begin_result) * 100 /
441 (query->end_time - query->begin_time);
442 return true;
443 case R600_QUERY_GPIN_ASIC_ID:
444 result->u32 = 0;
445 return true;
446 case R600_QUERY_GPIN_NUM_SIMD:
447 result->u32 = rctx->screen->info.num_good_compute_units;
448 return true;
449 case R600_QUERY_GPIN_NUM_RB:
450 result->u32 = rctx->screen->info.num_render_backends;
451 return true;
452 case R600_QUERY_GPIN_NUM_SPI:
453 result->u32 = 1; /* all supported chips have one SPI per SE */
454 return true;
455 case R600_QUERY_GPIN_NUM_SE:
456 result->u32 = rctx->screen->info.max_se;
457 return true;
458 }
459
460 result->u64 = query->end_result - query->begin_result;
461
462 switch (query->b.type) {
463 case R600_QUERY_BUFFER_WAIT_TIME:
464 case R600_QUERY_GPU_TEMPERATURE:
465 result->u64 /= 1000;
466 break;
467 case R600_QUERY_CURRENT_GPU_SCLK:
468 case R600_QUERY_CURRENT_GPU_MCLK:
469 result->u64 *= 1000000;
470 break;
471 }
472
473 return true;
474 }
475
476
477 static struct r600_query_ops sw_query_ops = {
478 .destroy = r600_query_sw_destroy,
479 .begin = r600_query_sw_begin,
480 .end = r600_query_sw_end,
481 .get_result = r600_query_sw_get_result,
482 .get_result_resource = NULL
483 };
484
485 static struct pipe_query *r600_query_sw_create(unsigned query_type)
486 {
487 struct r600_query_sw *query;
488
489 query = CALLOC_STRUCT(r600_query_sw);
490 if (!query)
491 return NULL;
492
493 query->b.type = query_type;
494 query->b.ops = &sw_query_ops;
495
496 return (struct pipe_query *)query;
497 }
498
499 void r600_query_hw_destroy(struct r600_common_screen *rscreen,
500 struct r600_query *rquery)
501 {
502 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
503 struct r600_query_buffer *prev = query->buffer.previous;
504
505 /* Release all query buffers. */
506 while (prev) {
507 struct r600_query_buffer *qbuf = prev;
508 prev = prev->previous;
509 r600_resource_reference(&qbuf->buf, NULL);
510 FREE(qbuf);
511 }
512
513 r600_resource_reference(&query->buffer.buf, NULL);
514 r600_resource_reference(&query->workaround_buf, NULL);
515 FREE(rquery);
516 }
517
518 static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen,
519 struct r600_query_hw *query)
520 {
521 unsigned buf_size = MAX2(query->result_size,
522 rscreen->info.min_alloc_size);
523
524 /* Queries are normally read by the CPU after
525 * being written by the gpu, hence staging is probably a good
526 * usage pattern.
527 */
528 struct r600_resource *buf = (struct r600_resource*)
529 pipe_buffer_create(&rscreen->b, 0,
530 PIPE_USAGE_STAGING, buf_size);
531 if (!buf)
532 return NULL;
533
534 if (!query->ops->prepare_buffer(rscreen, query, buf)) {
535 r600_resource_reference(&buf, NULL);
536 return NULL;
537 }
538
539 return buf;
540 }
541
542 static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
543 struct r600_query_hw *query,
544 struct r600_resource *buffer)
545 {
546 /* Callers ensure that the buffer is currently unused by the GPU. */
547 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
548 PIPE_TRANSFER_WRITE |
549 PIPE_TRANSFER_UNSYNCHRONIZED);
550 if (!results)
551 return false;
552
553 memset(results, 0, buffer->b.b.width0);
554
555 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
556 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
557 unsigned max_rbs = rscreen->info.num_render_backends;
558 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask;
559 unsigned num_results;
560 unsigned i, j;
561
562 /* Set top bits for unused backends. */
563 num_results = buffer->b.b.width0 / query->result_size;
564 for (j = 0; j < num_results; j++) {
565 for (i = 0; i < max_rbs; i++) {
566 if (!(enabled_rb_mask & (1<<i))) {
567 results[(i * 4)+1] = 0x80000000;
568 results[(i * 4)+3] = 0x80000000;
569 }
570 }
571 results += 4 * max_rbs;
572 }
573 }
574
575 return true;
576 }
577
578 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
579 struct r600_query *rquery,
580 bool wait,
581 enum pipe_query_value_type result_type,
582 int index,
583 struct pipe_resource *resource,
584 unsigned offset);
585
586 static struct r600_query_ops query_hw_ops = {
587 .destroy = r600_query_hw_destroy,
588 .begin = r600_query_hw_begin,
589 .end = r600_query_hw_end,
590 .get_result = r600_query_hw_get_result,
591 .get_result_resource = r600_query_hw_get_result_resource,
592 };
593
594 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
595 struct r600_query_hw *query,
596 struct r600_resource *buffer,
597 uint64_t va);
598 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
599 struct r600_query_hw *query,
600 struct r600_resource *buffer,
601 uint64_t va);
602 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
603 struct r600_query_hw *, void *buffer,
604 union pipe_query_result *result);
605 static void r600_query_hw_clear_result(struct r600_query_hw *,
606 union pipe_query_result *);
607
608 static struct r600_query_hw_ops query_hw_default_hw_ops = {
609 .prepare_buffer = r600_query_hw_prepare_buffer,
610 .emit_start = r600_query_hw_do_emit_start,
611 .emit_stop = r600_query_hw_do_emit_stop,
612 .clear_result = r600_query_hw_clear_result,
613 .add_result = r600_query_hw_add_result,
614 };
615
616 bool r600_query_hw_init(struct r600_common_screen *rscreen,
617 struct r600_query_hw *query)
618 {
619 query->buffer.buf = r600_new_query_buffer(rscreen, query);
620 if (!query->buffer.buf)
621 return false;
622
623 return true;
624 }
625
626 static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen,
627 unsigned query_type,
628 unsigned index)
629 {
630 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
631 if (!query)
632 return NULL;
633
634 query->b.type = query_type;
635 query->b.ops = &query_hw_ops;
636 query->ops = &query_hw_default_hw_ops;
637
638 switch (query_type) {
639 case PIPE_QUERY_OCCLUSION_COUNTER:
640 case PIPE_QUERY_OCCLUSION_PREDICATE:
641 query->result_size = 16 * rscreen->info.num_render_backends;
642 query->result_size += 16; /* for the fence + alignment */
643 query->num_cs_dw_begin = 6;
644 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
645 break;
646 case PIPE_QUERY_TIME_ELAPSED:
647 query->result_size = 24;
648 query->num_cs_dw_begin = 8;
649 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
650 break;
651 case PIPE_QUERY_TIMESTAMP:
652 query->result_size = 16;
653 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen);
654 query->flags = R600_QUERY_HW_FLAG_NO_START;
655 break;
656 case PIPE_QUERY_PRIMITIVES_EMITTED:
657 case PIPE_QUERY_PRIMITIVES_GENERATED:
658 case PIPE_QUERY_SO_STATISTICS:
659 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
660 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
661 query->result_size = 32;
662 query->num_cs_dw_begin = 6;
663 query->num_cs_dw_end = 6;
664 query->stream = index;
665 break;
666 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
667 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
668 query->result_size = 32 * R600_MAX_STREAMS;
669 query->num_cs_dw_begin = 6 * R600_MAX_STREAMS;
670 query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
671 break;
672 case PIPE_QUERY_PIPELINE_STATISTICS:
673 /* 11 values on EG, 8 on R600. */
674 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16;
675 query->result_size += 8; /* for the fence + alignment */
676 query->num_cs_dw_begin = 6;
677 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen);
678 break;
679 default:
680 assert(0);
681 FREE(query);
682 return NULL;
683 }
684
685 if (!r600_query_hw_init(rscreen, query)) {
686 FREE(query);
687 return NULL;
688 }
689
690 return (struct pipe_query *)query;
691 }
692
693 static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
694 unsigned type, int diff)
695 {
696 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
697 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
698 bool old_enable = rctx->num_occlusion_queries != 0;
699 bool old_perfect_enable =
700 rctx->num_perfect_occlusion_queries != 0;
701 bool enable, perfect_enable;
702
703 rctx->num_occlusion_queries += diff;
704 assert(rctx->num_occlusion_queries >= 0);
705
706 if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
707 rctx->num_perfect_occlusion_queries += diff;
708 assert(rctx->num_perfect_occlusion_queries >= 0);
709 }
710
711 enable = rctx->num_occlusion_queries != 0;
712 perfect_enable = rctx->num_perfect_occlusion_queries != 0;
713
714 if (enable != old_enable || perfect_enable != old_perfect_enable) {
715 rctx->set_occlusion_query_state(&rctx->b, enable);
716 }
717 }
718 }
719
720 static unsigned event_type_for_stream(unsigned stream)
721 {
722 switch (stream) {
723 default:
724 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS;
725 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1;
726 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2;
727 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3;
728 }
729 }
730
731 static void emit_sample_streamout(struct radeon_winsys_cs *cs, uint64_t va,
732 unsigned stream)
733 {
734 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
735 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
736 radeon_emit(cs, va);
737 radeon_emit(cs, va >> 32);
738 }
739
740 static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
741 struct r600_query_hw *query,
742 struct r600_resource *buffer,
743 uint64_t va)
744 {
745 struct radeon_winsys_cs *cs = ctx->gfx.cs;
746
747 switch (query->b.type) {
748 case PIPE_QUERY_OCCLUSION_COUNTER:
749 case PIPE_QUERY_OCCLUSION_PREDICATE:
750 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
751 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
752 radeon_emit(cs, va);
753 radeon_emit(cs, va >> 32);
754 break;
755 case PIPE_QUERY_PRIMITIVES_EMITTED:
756 case PIPE_QUERY_PRIMITIVES_GENERATED:
757 case PIPE_QUERY_SO_STATISTICS:
758 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
759 emit_sample_streamout(cs, va, query->stream);
760 break;
761 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
762 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
763 emit_sample_streamout(cs, va + 32 * stream, stream);
764 break;
765 case PIPE_QUERY_TIME_ELAPSED:
766 if (ctx->chip_class >= SI) {
767 /* Write the timestamp from the CP not waiting for
768 * outstanding draws (top-of-pipe).
769 */
770 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
771 radeon_emit(cs, COPY_DATA_COUNT_SEL |
772 COPY_DATA_SRC_SEL(COPY_DATA_TIMESTAMP) |
773 COPY_DATA_DST_SEL(COPY_DATA_MEM_ASYNC));
774 radeon_emit(cs, 0);
775 radeon_emit(cs, 0);
776 radeon_emit(cs, va);
777 radeon_emit(cs, va >> 32);
778 } else {
779 /* Write the timestamp after the last draw is done.
780 * (bottom-of-pipe)
781 */
782 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
783 0, 3, NULL, va, 0, query->b.type);
784 }
785 break;
786 case PIPE_QUERY_PIPELINE_STATISTICS:
787 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
788 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
789 radeon_emit(cs, va);
790 radeon_emit(cs, va >> 32);
791 break;
792 default:
793 assert(0);
794 }
795 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
796 RADEON_PRIO_QUERY);
797 }
798
799 static void r600_query_hw_emit_start(struct r600_common_context *ctx,
800 struct r600_query_hw *query)
801 {
802 uint64_t va;
803
804 if (!query->buffer.buf)
805 return; // previous buffer allocation failure
806
807 r600_update_occlusion_query_state(ctx, query->b.type, 1);
808 r600_update_prims_generated_query_state(ctx, query->b.type, 1);
809
810 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
811 true);
812
813 /* Get a new query buffer if needed. */
814 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
815 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
816 *qbuf = query->buffer;
817 query->buffer.results_end = 0;
818 query->buffer.previous = qbuf;
819 query->buffer.buf = r600_new_query_buffer(ctx->screen, query);
820 if (!query->buffer.buf)
821 return;
822 }
823
824 /* emit begin query */
825 va = query->buffer.buf->gpu_address + query->buffer.results_end;
826
827 query->ops->emit_start(ctx, query, query->buffer.buf, va);
828
829 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
830 }
831
832 static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
833 struct r600_query_hw *query,
834 struct r600_resource *buffer,
835 uint64_t va)
836 {
837 struct radeon_winsys_cs *cs = ctx->gfx.cs;
838 uint64_t fence_va = 0;
839
840 switch (query->b.type) {
841 case PIPE_QUERY_OCCLUSION_COUNTER:
842 case PIPE_QUERY_OCCLUSION_PREDICATE:
843 va += 8;
844 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
845 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
846 radeon_emit(cs, va);
847 radeon_emit(cs, va >> 32);
848
849 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
850 break;
851 case PIPE_QUERY_PRIMITIVES_EMITTED:
852 case PIPE_QUERY_PRIMITIVES_GENERATED:
853 case PIPE_QUERY_SO_STATISTICS:
854 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
855 va += 16;
856 emit_sample_streamout(cs, va, query->stream);
857 break;
858 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
859 va += 16;
860 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
861 emit_sample_streamout(cs, va + 32 * stream, stream);
862 break;
863 case PIPE_QUERY_TIME_ELAPSED:
864 va += 8;
865 /* fall through */
866 case PIPE_QUERY_TIMESTAMP:
867 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS,
868 0, 3, NULL, va, 0, query->b.type);
869 fence_va = va + 8;
870 break;
871 case PIPE_QUERY_PIPELINE_STATISTICS: {
872 unsigned sample_size = (query->result_size - 8) / 2;
873
874 va += sample_size;
875 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
876 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
877 radeon_emit(cs, va);
878 radeon_emit(cs, va >> 32);
879
880 fence_va = va + sample_size;
881 break;
882 }
883 default:
884 assert(0);
885 }
886 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
887 RADEON_PRIO_QUERY);
888
889 if (fence_va)
890 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 1,
891 query->buffer.buf, fence_va, 0x80000000,
892 query->b.type);
893 }
894
895 static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
896 struct r600_query_hw *query)
897 {
898 uint64_t va;
899
900 if (!query->buffer.buf)
901 return; // previous buffer allocation failure
902
903 /* The queries which need begin already called this in begin_query. */
904 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
905 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
906 }
907
908 /* emit end query */
909 va = query->buffer.buf->gpu_address + query->buffer.results_end;
910
911 query->ops->emit_stop(ctx, query, query->buffer.buf, va);
912
913 query->buffer.results_end += query->result_size;
914
915 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
916 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
917
918 r600_update_occlusion_query_state(ctx, query->b.type, -1);
919 r600_update_prims_generated_query_state(ctx, query->b.type, -1);
920 }
921
922 static void emit_set_predicate(struct r600_common_context *ctx,
923 struct r600_resource *buf, uint64_t va,
924 uint32_t op)
925 {
926 struct radeon_winsys_cs *cs = ctx->gfx.cs;
927
928 if (ctx->chip_class >= GFX9) {
929 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
930 radeon_emit(cs, op);
931 radeon_emit(cs, va);
932 radeon_emit(cs, va >> 32);
933 } else {
934 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
935 radeon_emit(cs, va);
936 radeon_emit(cs, op | ((va >> 32) & 0xFF));
937 }
938 r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
939 RADEON_PRIO_QUERY);
940 }
941
942 static void r600_emit_query_predication(struct r600_common_context *ctx,
943 struct r600_atom *atom)
944 {
945 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
946 struct r600_query_buffer *qbuf;
947 uint32_t op;
948 bool flag_wait, invert;
949
950 if (!query)
951 return;
952
953 invert = ctx->render_cond_invert;
954 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
955 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
956
957 if (query->workaround_buf) {
958 op = PRED_OP(PREDICATION_OP_BOOL64);
959 } else {
960 switch (query->b.type) {
961 case PIPE_QUERY_OCCLUSION_COUNTER:
962 case PIPE_QUERY_OCCLUSION_PREDICATE:
963 op = PRED_OP(PREDICATION_OP_ZPASS);
964 break;
965 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
966 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
967 op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
968 invert = !invert;
969 break;
970 default:
971 assert(0);
972 return;
973 }
974 }
975
976 /* if true then invert, see GL_ARB_conditional_render_inverted */
977 if (invert)
978 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
979 else
980 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
981
982 /* Use the value written by compute shader as a workaround. Note that
983 * the wait flag does not apply in this predication mode.
984 *
985 * The shader outputs the result value to L2. Workarounds only affect VI
986 * and later, where the CP reads data from L2, so we don't need an
987 * additional flush.
988 */
989 if (query->workaround_buf) {
990 uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset;
991 emit_set_predicate(ctx, query->workaround_buf, va, op);
992 return;
993 }
994
995 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
996
997 /* emit predicate packets for all data blocks */
998 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
999 unsigned results_base = 0;
1000 uint64_t va_base = qbuf->buf->gpu_address;
1001
1002 while (results_base < qbuf->results_end) {
1003 uint64_t va = va_base + results_base;
1004
1005 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
1006 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
1007 emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
1008
1009 /* set CONTINUE bit for all packets except the first */
1010 op |= PREDICATION_CONTINUE;
1011 }
1012 } else {
1013 emit_set_predicate(ctx, qbuf->buf, va, op);
1014 op |= PREDICATION_CONTINUE;
1015 }
1016
1017 results_base += query->result_size;
1018 }
1019 }
1020 }
1021
1022 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
1023 {
1024 struct r600_common_screen *rscreen =
1025 (struct r600_common_screen *)ctx->screen;
1026
1027 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
1028 query_type == PIPE_QUERY_GPU_FINISHED ||
1029 query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
1030 return r600_query_sw_create(query_type);
1031
1032 return r600_query_hw_create(rscreen, query_type, index);
1033 }
1034
1035 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
1036 {
1037 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1038 struct r600_query *rquery = (struct r600_query *)query;
1039
1040 rquery->ops->destroy(rctx->screen, rquery);
1041 }
1042
1043 static boolean r600_begin_query(struct pipe_context *ctx,
1044 struct pipe_query *query)
1045 {
1046 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1047 struct r600_query *rquery = (struct r600_query *)query;
1048
1049 return rquery->ops->begin(rctx, rquery);
1050 }
1051
1052 void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
1053 struct r600_query_hw *query)
1054 {
1055 struct r600_query_buffer *prev = query->buffer.previous;
1056
1057 /* Discard the old query buffers. */
1058 while (prev) {
1059 struct r600_query_buffer *qbuf = prev;
1060 prev = prev->previous;
1061 r600_resource_reference(&qbuf->buf, NULL);
1062 FREE(qbuf);
1063 }
1064
1065 query->buffer.results_end = 0;
1066 query->buffer.previous = NULL;
1067
1068 /* Obtain a new buffer if the current one can't be mapped without a stall. */
1069 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1070 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1071 r600_resource_reference(&query->buffer.buf, NULL);
1072 query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
1073 } else {
1074 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf))
1075 r600_resource_reference(&query->buffer.buf, NULL);
1076 }
1077 }
1078
1079 bool r600_query_hw_begin(struct r600_common_context *rctx,
1080 struct r600_query *rquery)
1081 {
1082 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1083
1084 if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
1085 assert(0);
1086 return false;
1087 }
1088
1089 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
1090 r600_query_hw_reset_buffers(rctx, query);
1091
1092 r600_resource_reference(&query->workaround_buf, NULL);
1093
1094 r600_query_hw_emit_start(rctx, query);
1095 if (!query->buffer.buf)
1096 return false;
1097
1098 LIST_ADDTAIL(&query->list, &rctx->active_queries);
1099 return true;
1100 }
1101
1102 static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
1103 {
1104 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1105 struct r600_query *rquery = (struct r600_query *)query;
1106
1107 return rquery->ops->end(rctx, rquery);
1108 }
1109
1110 bool r600_query_hw_end(struct r600_common_context *rctx,
1111 struct r600_query *rquery)
1112 {
1113 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1114
1115 if (query->flags & R600_QUERY_HW_FLAG_NO_START)
1116 r600_query_hw_reset_buffers(rctx, query);
1117
1118 r600_query_hw_emit_stop(rctx, query);
1119
1120 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
1121 LIST_DELINIT(&query->list);
1122
1123 if (!query->buffer.buf)
1124 return false;
1125
1126 return true;
1127 }
1128
1129 static void r600_get_hw_query_params(struct r600_common_context *rctx,
1130 struct r600_query_hw *rquery, int index,
1131 struct r600_hw_query_params *params)
1132 {
1133 unsigned max_rbs = rctx->screen->info.num_render_backends;
1134
1135 params->pair_stride = 0;
1136 params->pair_count = 1;
1137
1138 switch (rquery->b.type) {
1139 case PIPE_QUERY_OCCLUSION_COUNTER:
1140 case PIPE_QUERY_OCCLUSION_PREDICATE:
1141 params->start_offset = 0;
1142 params->end_offset = 8;
1143 params->fence_offset = max_rbs * 16;
1144 params->pair_stride = 16;
1145 params->pair_count = max_rbs;
1146 break;
1147 case PIPE_QUERY_TIME_ELAPSED:
1148 params->start_offset = 0;
1149 params->end_offset = 8;
1150 params->fence_offset = 16;
1151 break;
1152 case PIPE_QUERY_TIMESTAMP:
1153 params->start_offset = 0;
1154 params->end_offset = 0;
1155 params->fence_offset = 8;
1156 break;
1157 case PIPE_QUERY_PRIMITIVES_EMITTED:
1158 params->start_offset = 8;
1159 params->end_offset = 24;
1160 params->fence_offset = params->end_offset + 4;
1161 break;
1162 case PIPE_QUERY_PRIMITIVES_GENERATED:
1163 params->start_offset = 0;
1164 params->end_offset = 16;
1165 params->fence_offset = params->end_offset + 4;
1166 break;
1167 case PIPE_QUERY_SO_STATISTICS:
1168 params->start_offset = 8 - index * 8;
1169 params->end_offset = 24 - index * 8;
1170 params->fence_offset = params->end_offset + 4;
1171 break;
1172 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1173 params->pair_count = R600_MAX_STREAMS;
1174 params->pair_stride = 32;
1175 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1176 params->start_offset = 0;
1177 params->end_offset = 16;
1178
1179 /* We can re-use the high dword of the last 64-bit value as a
1180 * fence: it is initialized as 0, and the high bit is set by
1181 * the write of the streamout stats event.
1182 */
1183 params->fence_offset = rquery->result_size - 4;
1184 break;
1185 case PIPE_QUERY_PIPELINE_STATISTICS:
1186 {
1187 /* Offsets apply to EG+ */
1188 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1189 params->start_offset = offsets[index];
1190 params->end_offset = 88 + offsets[index];
1191 params->fence_offset = 2 * 88;
1192 break;
1193 }
1194 default:
1195 unreachable("r600_get_hw_query_params unsupported");
1196 }
1197 }
1198
1199 static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
1200 bool test_status_bit)
1201 {
1202 uint32_t *current_result = (uint32_t*)map;
1203 uint64_t start, end;
1204
1205 start = (uint64_t)current_result[start_index] |
1206 (uint64_t)current_result[start_index+1] << 32;
1207 end = (uint64_t)current_result[end_index] |
1208 (uint64_t)current_result[end_index+1] << 32;
1209
1210 if (!test_status_bit ||
1211 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1212 return end - start;
1213 }
1214 return 0;
1215 }
1216
1217 static void r600_query_hw_add_result(struct r600_common_screen *rscreen,
1218 struct r600_query_hw *query,
1219 void *buffer,
1220 union pipe_query_result *result)
1221 {
1222 unsigned max_rbs = rscreen->info.num_render_backends;
1223
1224 switch (query->b.type) {
1225 case PIPE_QUERY_OCCLUSION_COUNTER: {
1226 for (unsigned i = 0; i < max_rbs; ++i) {
1227 unsigned results_base = i * 16;
1228 result->u64 +=
1229 r600_query_read_result(buffer + results_base, 0, 2, true);
1230 }
1231 break;
1232 }
1233 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1234 for (unsigned i = 0; i < max_rbs; ++i) {
1235 unsigned results_base = i * 16;
1236 result->b = result->b ||
1237 r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
1238 }
1239 break;
1240 }
1241 case PIPE_QUERY_TIME_ELAPSED:
1242 result->u64 += r600_query_read_result(buffer, 0, 2, false);
1243 break;
1244 case PIPE_QUERY_TIMESTAMP:
1245 result->u64 = *(uint64_t*)buffer;
1246 break;
1247 case PIPE_QUERY_PRIMITIVES_EMITTED:
1248 /* SAMPLE_STREAMOUTSTATS stores this structure:
1249 * {
1250 * u64 NumPrimitivesWritten;
1251 * u64 PrimitiveStorageNeeded;
1252 * }
1253 * We only need NumPrimitivesWritten here. */
1254 result->u64 += r600_query_read_result(buffer, 2, 6, true);
1255 break;
1256 case PIPE_QUERY_PRIMITIVES_GENERATED:
1257 /* Here we read PrimitiveStorageNeeded. */
1258 result->u64 += r600_query_read_result(buffer, 0, 4, true);
1259 break;
1260 case PIPE_QUERY_SO_STATISTICS:
1261 result->so_statistics.num_primitives_written +=
1262 r600_query_read_result(buffer, 2, 6, true);
1263 result->so_statistics.primitives_storage_needed +=
1264 r600_query_read_result(buffer, 0, 4, true);
1265 break;
1266 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1267 result->b = result->b ||
1268 r600_query_read_result(buffer, 2, 6, true) !=
1269 r600_query_read_result(buffer, 0, 4, true);
1270 break;
1271 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1272 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
1273 result->b = result->b ||
1274 r600_query_read_result(buffer, 2, 6, true) !=
1275 r600_query_read_result(buffer, 0, 4, true);
1276 buffer = (char *)buffer + 32;
1277 }
1278 break;
1279 case PIPE_QUERY_PIPELINE_STATISTICS:
1280 if (rscreen->chip_class >= EVERGREEN) {
1281 result->pipeline_statistics.ps_invocations +=
1282 r600_query_read_result(buffer, 0, 22, false);
1283 result->pipeline_statistics.c_primitives +=
1284 r600_query_read_result(buffer, 2, 24, false);
1285 result->pipeline_statistics.c_invocations +=
1286 r600_query_read_result(buffer, 4, 26, false);
1287 result->pipeline_statistics.vs_invocations +=
1288 r600_query_read_result(buffer, 6, 28, false);
1289 result->pipeline_statistics.gs_invocations +=
1290 r600_query_read_result(buffer, 8, 30, false);
1291 result->pipeline_statistics.gs_primitives +=
1292 r600_query_read_result(buffer, 10, 32, false);
1293 result->pipeline_statistics.ia_primitives +=
1294 r600_query_read_result(buffer, 12, 34, false);
1295 result->pipeline_statistics.ia_vertices +=
1296 r600_query_read_result(buffer, 14, 36, false);
1297 result->pipeline_statistics.hs_invocations +=
1298 r600_query_read_result(buffer, 16, 38, false);
1299 result->pipeline_statistics.ds_invocations +=
1300 r600_query_read_result(buffer, 18, 40, false);
1301 result->pipeline_statistics.cs_invocations +=
1302 r600_query_read_result(buffer, 20, 42, false);
1303 } else {
1304 result->pipeline_statistics.ps_invocations +=
1305 r600_query_read_result(buffer, 0, 16, false);
1306 result->pipeline_statistics.c_primitives +=
1307 r600_query_read_result(buffer, 2, 18, false);
1308 result->pipeline_statistics.c_invocations +=
1309 r600_query_read_result(buffer, 4, 20, false);
1310 result->pipeline_statistics.vs_invocations +=
1311 r600_query_read_result(buffer, 6, 22, false);
1312 result->pipeline_statistics.gs_invocations +=
1313 r600_query_read_result(buffer, 8, 24, false);
1314 result->pipeline_statistics.gs_primitives +=
1315 r600_query_read_result(buffer, 10, 26, false);
1316 result->pipeline_statistics.ia_primitives +=
1317 r600_query_read_result(buffer, 12, 28, false);
1318 result->pipeline_statistics.ia_vertices +=
1319 r600_query_read_result(buffer, 14, 30, false);
1320 }
1321 #if 0 /* for testing */
1322 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1323 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1324 "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1325 result->pipeline_statistics.ia_vertices,
1326 result->pipeline_statistics.ia_primitives,
1327 result->pipeline_statistics.vs_invocations,
1328 result->pipeline_statistics.hs_invocations,
1329 result->pipeline_statistics.ds_invocations,
1330 result->pipeline_statistics.gs_invocations,
1331 result->pipeline_statistics.gs_primitives,
1332 result->pipeline_statistics.c_invocations,
1333 result->pipeline_statistics.c_primitives,
1334 result->pipeline_statistics.ps_invocations,
1335 result->pipeline_statistics.cs_invocations);
1336 #endif
1337 break;
1338 default:
1339 assert(0);
1340 }
1341 }
1342
1343 static boolean r600_get_query_result(struct pipe_context *ctx,
1344 struct pipe_query *query, boolean wait,
1345 union pipe_query_result *result)
1346 {
1347 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1348 struct r600_query *rquery = (struct r600_query *)query;
1349
1350 return rquery->ops->get_result(rctx, rquery, wait, result);
1351 }
1352
1353 static void r600_get_query_result_resource(struct pipe_context *ctx,
1354 struct pipe_query *query,
1355 boolean wait,
1356 enum pipe_query_value_type result_type,
1357 int index,
1358 struct pipe_resource *resource,
1359 unsigned offset)
1360 {
1361 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1362 struct r600_query *rquery = (struct r600_query *)query;
1363
1364 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
1365 resource, offset);
1366 }
1367
1368 static void r600_query_hw_clear_result(struct r600_query_hw *query,
1369 union pipe_query_result *result)
1370 {
1371 util_query_clear_result(result, query->b.type);
1372 }
1373
1374 bool r600_query_hw_get_result(struct r600_common_context *rctx,
1375 struct r600_query *rquery,
1376 bool wait, union pipe_query_result *result)
1377 {
1378 struct r600_common_screen *rscreen = rctx->screen;
1379 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1380 struct r600_query_buffer *qbuf;
1381
1382 query->ops->clear_result(query, result);
1383
1384 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1385 unsigned usage = PIPE_TRANSFER_READ |
1386 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1387 unsigned results_base = 0;
1388 void *map;
1389
1390 if (rquery->b.flushed)
1391 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1392 else
1393 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);
1394
1395 if (!map)
1396 return false;
1397
1398 while (results_base != qbuf->results_end) {
1399 query->ops->add_result(rscreen, query, map + results_base,
1400 result);
1401 results_base += query->result_size;
1402 }
1403 }
1404
1405 /* Convert the time to expected units. */
1406 if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1407 rquery->type == PIPE_QUERY_TIMESTAMP) {
1408 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq;
1409 }
1410 return true;
1411 }
1412
1413 /* Create the compute shader that is used to collect the results.
1414 *
1415 * One compute grid with a single thread is launched for every query result
1416 * buffer. The thread (optionally) reads a previous summary buffer, then
1417 * accumulates data from the query result buffer, and writes the result either
1418 * to a summary buffer to be consumed by the next grid invocation or to the
1419 * user-supplied buffer.
1420 *
1421 * Data layout:
1422 *
1423 * CONST
1424 * 0.x = end_offset
1425 * 0.y = result_stride
1426 * 0.z = result_count
1427 * 0.w = bit field:
1428 * 1: read previously accumulated values
1429 * 2: write accumulated values for chaining
1430 * 4: write result available
1431 * 8: convert result to boolean (0/1)
1432 * 16: only read one dword and use that as result
1433 * 32: apply timestamp conversion
1434 * 64: store full 64 bits result
1435 * 128: store signed 32 bits result
1436 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs
1437 * 1.x = fence_offset
1438 * 1.y = pair_stride
1439 * 1.z = pair_count
1440 *
1441 * BUFFER[0] = query result buffer
1442 * BUFFER[1] = previous summary buffer
1443 * BUFFER[2] = next summary buffer or user-supplied buffer
1444 */
1445 static void r600_create_query_result_shader(struct r600_common_context *rctx)
1446 {
1447 /* TEMP[0].xy = accumulated result so far
1448 * TEMP[0].z = result not available
1449 *
1450 * TEMP[1].x = current result index
1451 * TEMP[1].y = current pair index
1452 */
1453 static const char text_tmpl[] =
1454 "COMP\n"
1455 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
1456 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
1457 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
1458 "DCL BUFFER[0]\n"
1459 "DCL BUFFER[1]\n"
1460 "DCL BUFFER[2]\n"
1461 "DCL CONST[0..1]\n"
1462 "DCL TEMP[0..5]\n"
1463 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
1464 "IMM[1] UINT32 {1, 2, 4, 8}\n"
1465 "IMM[2] UINT32 {16, 32, 64, 128}\n"
1466 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
1467 "IMM[4] UINT32 {256, 0, 0, 0}\n"
1468
1469 "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
1470 "UIF TEMP[5]\n"
1471 /* Check result availability. */
1472 "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
1473 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
1474 "MOV TEMP[1], TEMP[0].zzzz\n"
1475 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1476
1477 /* Load result if available. */
1478 "UIF TEMP[1]\n"
1479 "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
1480 "ENDIF\n"
1481 "ELSE\n"
1482 /* Load previously accumulated result if requested. */
1483 "MOV TEMP[0], IMM[0].xxxx\n"
1484 "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
1485 "UIF TEMP[4]\n"
1486 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
1487 "ENDIF\n"
1488
1489 "MOV TEMP[1].x, IMM[0].xxxx\n"
1490 "BGNLOOP\n"
1491 /* Break if accumulated result so far is not available. */
1492 "UIF TEMP[0].zzzz\n"
1493 "BRK\n"
1494 "ENDIF\n"
1495
1496 /* Break if result_index >= result_count. */
1497 "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
1498 "UIF TEMP[5]\n"
1499 "BRK\n"
1500 "ENDIF\n"
1501
1502 /* Load fence and check result availability */
1503 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
1504 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
1505 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
1506 "NOT TEMP[0].z, TEMP[0].zzzz\n"
1507 "UIF TEMP[0].zzzz\n"
1508 "BRK\n"
1509 "ENDIF\n"
1510
1511 "MOV TEMP[1].y, IMM[0].xxxx\n"
1512 "BGNLOOP\n"
1513 /* Load start and end. */
1514 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
1515 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
1516 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1517
1518 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0].xxxx\n"
1519 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1520
1521 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
1522
1523 "AND TEMP[5].z, CONST[0].wwww, IMM[4].xxxx\n"
1524 "UIF TEMP[5].zzzz\n"
1525 /* Load second start/end half-pair and
1526 * take the difference
1527 */
1528 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
1529 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
1530 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
1531
1532 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
1533 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
1534 "ENDIF\n"
1535
1536 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
1537
1538 /* Increment pair index */
1539 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
1540 "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
1541 "UIF TEMP[5]\n"
1542 "BRK\n"
1543 "ENDIF\n"
1544 "ENDLOOP\n"
1545
1546 /* Increment result index */
1547 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
1548 "ENDLOOP\n"
1549 "ENDIF\n"
1550
1551 "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
1552 "UIF TEMP[4]\n"
1553 /* Store accumulated data for chaining. */
1554 "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
1555 "ELSE\n"
1556 "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
1557 "UIF TEMP[4]\n"
1558 /* Store result availability. */
1559 "NOT TEMP[0].z, TEMP[0]\n"
1560 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
1561 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
1562
1563 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1564 "UIF TEMP[4]\n"
1565 "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
1566 "ENDIF\n"
1567 "ELSE\n"
1568 /* Store result if it is available. */
1569 "NOT TEMP[4], TEMP[0].zzzz\n"
1570 "UIF TEMP[4]\n"
1571 /* Apply timestamp conversion */
1572 "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
1573 "UIF TEMP[4]\n"
1574 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
1575 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
1576 "ENDIF\n"
1577
1578 /* Convert to boolean */
1579 "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
1580 "UIF TEMP[4]\n"
1581 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
1582 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
1583 "MOV TEMP[0].y, IMM[0].xxxx\n"
1584 "ENDIF\n"
1585
1586 "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
1587 "UIF TEMP[4]\n"
1588 "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
1589 "ELSE\n"
1590 /* Clamping */
1591 "UIF TEMP[0].yyyy\n"
1592 "MOV TEMP[0].x, IMM[0].wwww\n"
1593 "ENDIF\n"
1594
1595 "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
1596 "UIF TEMP[4]\n"
1597 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
1598 "ENDIF\n"
1599
1600 "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
1601 "ENDIF\n"
1602 "ENDIF\n"
1603 "ENDIF\n"
1604 "ENDIF\n"
1605
1606 "END\n";
1607
1608 char text[sizeof(text_tmpl) + 32];
1609 struct tgsi_token tokens[1024];
1610 struct pipe_compute_state state = {};
1611
1612 /* Hard code the frequency into the shader so that the backend can
1613 * use the full range of optimizations for divide-by-constant.
1614 */
1615 snprintf(text, sizeof(text), text_tmpl,
1616 rctx->screen->info.clock_crystal_freq);
1617
1618 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
1619 assert(false);
1620 return;
1621 }
1622
1623 state.ir_type = PIPE_SHADER_IR_TGSI;
1624 state.prog = tokens;
1625
1626 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
1627 }
1628
1629 static void r600_restore_qbo_state(struct r600_common_context *rctx,
1630 struct r600_qbo_state *st)
1631 {
1632 rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
1633
1634 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1635 pipe_resource_reference(&st->saved_const0.buffer, NULL);
1636
1637 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1638 for (unsigned i = 0; i < 3; ++i)
1639 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1640 }
1641
1642 static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
1643 struct r600_query *rquery,
1644 bool wait,
1645 enum pipe_query_value_type result_type,
1646 int index,
1647 struct pipe_resource *resource,
1648 unsigned offset)
1649 {
1650 struct r600_query_hw *query = (struct r600_query_hw *)rquery;
1651 struct r600_query_buffer *qbuf;
1652 struct r600_query_buffer *qbuf_prev;
1653 struct pipe_resource *tmp_buffer = NULL;
1654 unsigned tmp_buffer_offset = 0;
1655 struct r600_qbo_state saved_state = {};
1656 struct pipe_grid_info grid = {};
1657 struct pipe_constant_buffer constant_buffer = {};
1658 struct pipe_shader_buffer ssbo[3];
1659 struct r600_hw_query_params params;
1660 struct {
1661 uint32_t end_offset;
1662 uint32_t result_stride;
1663 uint32_t result_count;
1664 uint32_t config;
1665 uint32_t fence_offset;
1666 uint32_t pair_stride;
1667 uint32_t pair_count;
1668 } consts;
1669
1670 if (!rctx->query_result_shader) {
1671 r600_create_query_result_shader(rctx);
1672 if (!rctx->query_result_shader)
1673 return;
1674 }
1675
1676 if (query->buffer.previous) {
1677 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
1678 &tmp_buffer_offset, &tmp_buffer);
1679 if (!tmp_buffer)
1680 return;
1681 }
1682
1683 rctx->save_qbo_state(&rctx->b, &saved_state);
1684
1685 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, &params);
1686 consts.end_offset = params.end_offset - params.start_offset;
1687 consts.fence_offset = params.fence_offset - params.start_offset;
1688 consts.result_stride = query->result_size;
1689 consts.pair_stride = params.pair_stride;
1690 consts.pair_count = params.pair_count;
1691
1692 constant_buffer.buffer_size = sizeof(consts);
1693 constant_buffer.user_buffer = &consts;
1694
1695 ssbo[1].buffer = tmp_buffer;
1696 ssbo[1].buffer_offset = tmp_buffer_offset;
1697 ssbo[1].buffer_size = 16;
1698
1699 ssbo[2] = ssbo[1];
1700
1701 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
1702
1703 grid.block[0] = 1;
1704 grid.block[1] = 1;
1705 grid.block[2] = 1;
1706 grid.grid[0] = 1;
1707 grid.grid[1] = 1;
1708 grid.grid[2] = 1;
1709
1710 consts.config = 0;
1711 if (index < 0)
1712 consts.config |= 4;
1713 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE)
1714 consts.config |= 8;
1715 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1716 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1717 consts.config |= 8 | 256;
1718 else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1719 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1720 consts.config |= 32;
1721
1722 switch (result_type) {
1723 case PIPE_QUERY_TYPE_U64:
1724 case PIPE_QUERY_TYPE_I64:
1725 consts.config |= 64;
1726 break;
1727 case PIPE_QUERY_TYPE_I32:
1728 consts.config |= 128;
1729 break;
1730 case PIPE_QUERY_TYPE_U32:
1731 break;
1732 }
1733
1734 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
1735
1736 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1737 if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1738 qbuf_prev = qbuf->previous;
1739 consts.result_count = qbuf->results_end / query->result_size;
1740 consts.config &= ~3;
1741 if (qbuf != &query->buffer)
1742 consts.config |= 1;
1743 if (qbuf->previous)
1744 consts.config |= 2;
1745 } else {
1746 /* Only read the last timestamp. */
1747 qbuf_prev = NULL;
1748 consts.result_count = 0;
1749 consts.config |= 16;
1750 params.start_offset += qbuf->results_end - query->result_size;
1751 }
1752
1753 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1754
1755 ssbo[0].buffer = &qbuf->buf->b.b;
1756 ssbo[0].buffer_offset = params.start_offset;
1757 ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1758
1759 if (!qbuf->previous) {
1760 ssbo[2].buffer = resource;
1761 ssbo[2].buffer_offset = offset;
1762 ssbo[2].buffer_size = 8;
1763
1764 ((struct r600_resource *)resource)->TC_L2_dirty = true;
1765 }
1766
1767 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1768
1769 if (wait && qbuf == &query->buffer) {
1770 uint64_t va;
1771
1772 /* Wait for result availability. Wait only for readiness
1773 * of the last entry, since the fence writes should be
1774 * serialized in the CP.
1775 */
1776 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1777 va += params.fence_offset;
1778
1779 r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
1780 }
1781
1782 rctx->b.launch_grid(&rctx->b, &grid);
1783 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
1784 }
1785
1786 r600_restore_qbo_state(rctx, &saved_state);
1787 pipe_resource_reference(&tmp_buffer, NULL);
1788 }
1789
1790 static void r600_render_condition(struct pipe_context *ctx,
1791 struct pipe_query *query,
1792 boolean condition,
1793 enum pipe_render_cond_flag mode)
1794 {
1795 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
1796 struct r600_query_hw *rquery = (struct r600_query_hw *)query;
1797 struct r600_query_buffer *qbuf;
1798 struct r600_atom *atom = &rctx->render_cond_atom;
1799
1800 /* Compute the size of SET_PREDICATION packets. */
1801 atom->num_dw = 0;
1802 if (query) {
1803 bool needs_workaround = false;
1804
1805 /* There is a firmware regression in VI which causes successive
1806 * SET_PREDICATION packets to give the wrong answer for
1807 * non-inverted stream overflow predication.
1808 */
1809 if (rctx->chip_class >= VI && !condition &&
1810 (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE ||
1811 (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE &&
1812 (rquery->buffer.previous ||
1813 rquery->buffer.results_end > rquery->result_size)))) {
1814 needs_workaround = true;
1815 }
1816
1817 if (needs_workaround && !rquery->workaround_buf) {
1818 bool old_force_off = rctx->render_cond_force_off;
1819 rctx->render_cond_force_off = true;
1820
1821 u_suballocator_alloc(
1822 rctx->allocator_zeroed_memory, 8, 8,
1823 &rquery->workaround_offset,
1824 (struct pipe_resource **)&rquery->workaround_buf);
1825
1826 /* Reset to NULL to avoid a redundant SET_PREDICATION
1827 * from launching the compute grid.
1828 */
1829 rctx->render_cond = NULL;
1830
1831 ctx->get_query_result_resource(
1832 ctx, query, true, PIPE_QUERY_TYPE_U64, 0,
1833 &rquery->workaround_buf->b.b, rquery->workaround_offset);
1834
1835 atom->num_dw = 5;
1836
1837 rctx->render_cond_force_off = old_force_off;
1838 } else {
1839 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
1840 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
1841
1842 if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1843 atom->num_dw *= R600_MAX_STREAMS;
1844 }
1845 }
1846
1847 rctx->render_cond = query;
1848 rctx->render_cond_invert = condition;
1849 rctx->render_cond_mode = mode;
1850
1851 rctx->set_atom_dirty(rctx, atom, query != NULL);
1852 }
1853
1854 void r600_suspend_queries(struct r600_common_context *ctx)
1855 {
1856 struct r600_query_hw *query;
1857
1858 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1859 r600_query_hw_emit_stop(ctx, query);
1860 }
1861 assert(ctx->num_cs_dw_queries_suspend == 0);
1862 }
1863
1864 static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
1865 struct list_head *query_list)
1866 {
1867 struct r600_query_hw *query;
1868 unsigned num_dw = 0;
1869
1870 LIST_FOR_EACH_ENTRY(query, query_list, list) {
1871 /* begin + end */
1872 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
1873
1874 /* Workaround for the fact that
1875 * num_cs_dw_nontimer_queries_suspend is incremented for every
1876 * resumed query, which raises the bar in need_cs_space for
1877 * queries about to be resumed.
1878 */
1879 num_dw += query->num_cs_dw_end;
1880 }
1881 /* primitives generated query */
1882 num_dw += ctx->streamout.enable_atom.num_dw;
1883 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
1884 num_dw += 13;
1885
1886 return num_dw;
1887 }
1888
1889 void r600_resume_queries(struct r600_common_context *ctx)
1890 {
1891 struct r600_query_hw *query;
1892 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
1893
1894 assert(ctx->num_cs_dw_queries_suspend == 0);
1895
1896 /* Check CS space here. Resuming must not be interrupted by flushes. */
1897 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
1898
1899 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
1900 r600_query_hw_emit_start(ctx, query);
1901 }
1902 }
1903
1904 /* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
1905 void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
1906 {
1907 struct r600_common_context *ctx =
1908 (struct r600_common_context*)rscreen->aux_context;
1909 struct radeon_winsys_cs *cs = ctx->gfx.cs;
1910 struct r600_resource *buffer;
1911 uint32_t *results;
1912 unsigned i, mask = 0;
1913 unsigned max_rbs = ctx->screen->info.num_render_backends;
1914
1915 assert(rscreen->chip_class <= CAYMAN);
1916
1917 /* if backend_map query is supported by the kernel */
1918 if (rscreen->info.r600_gb_backend_map_valid) {
1919 unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
1920 unsigned backend_map = rscreen->info.r600_gb_backend_map;
1921 unsigned item_width, item_mask;
1922
1923 if (ctx->chip_class >= EVERGREEN) {
1924 item_width = 4;
1925 item_mask = 0x7;
1926 } else {
1927 item_width = 2;
1928 item_mask = 0x3;
1929 }
1930
1931 while (num_tile_pipes--) {
1932 i = backend_map & item_mask;
1933 mask |= (1<<i);
1934 backend_map >>= item_width;
1935 }
1936 if (mask != 0) {
1937 rscreen->info.enabled_rb_mask = mask;
1938 return;
1939 }
1940 }
1941
1942 /* otherwise backup path for older kernels */
1943
1944 /* create buffer for event data */
1945 buffer = (struct r600_resource*)
1946 pipe_buffer_create(ctx->b.screen, 0,
1947 PIPE_USAGE_STAGING, max_rbs * 16);
1948 if (!buffer)
1949 return;
1950
1951 /* initialize buffer with zeroes */
1952 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
1953 if (results) {
1954 memset(results, 0, max_rbs * 4 * 4);
1955
1956 /* emit EVENT_WRITE for ZPASS_DONE */
1957 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
1958 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
1959 radeon_emit(cs, buffer->gpu_address);
1960 radeon_emit(cs, buffer->gpu_address >> 32);
1961
1962 r600_emit_reloc(ctx, &ctx->gfx, buffer,
1963 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
1964
1965 /* analyze results */
1966 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
1967 if (results) {
1968 for(i = 0; i < max_rbs; i++) {
1969 /* at least highest bit will be set if backend is used */
1970 if (results[i*4 + 1])
1971 mask |= (1<<i);
1972 }
1973 }
1974 }
1975
1976 r600_resource_reference(&buffer, NULL);
1977
1978 if (mask)
1979 rscreen->info.enabled_rb_mask = mask;
1980 }
1981
1982 #define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1983 { \
1984 .name = name_, \
1985 .query_type = R600_QUERY_##query_type_, \
1986 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1987 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1988 .group_id = group_id_ \
1989 }
1990
1991 #define X(name_, query_type_, type_, result_type_) \
1992 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1993
1994 #define XG(group_, name_, query_type_, type_, result_type_) \
1995 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
1996
1997 static struct pipe_driver_query_info r600_driver_query_list[] = {
1998 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
1999 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
2000 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
2001 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
2002 X("decompress-calls", DECOMPRESS_CALLS, UINT64, AVERAGE),
2003 X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE),
2004 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE),
2005 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
2006 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
2007 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
2008 X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
2009 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE),
2010 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
2011 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
2012 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
2013 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE),
2014 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE),
2015 X("num-L2-invalidates", NUM_L2_INVALIDATES, UINT64, AVERAGE),
2016 X("num-L2-writebacks", NUM_L2_WRITEBACKS, UINT64, AVERAGE),
2017 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE),
2018 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE),
2019 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE),
2020 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE),
2021 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE),
2022 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE),
2023 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
2024 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
2025 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
2026 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
2027 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
2028 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE),
2029 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE),
2030 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE),
2031 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE),
2032 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
2033 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
2034 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
2035 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
2036 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE),
2037 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
2038 X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
2039
2040 /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
2041 * which use it as a fallback path to detect the GPU type.
2042 *
2043 * Note: The names of these queries are significant for GPUPerfStudio
2044 * (and possibly their order as well). */
2045 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
2046 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
2047 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
2048 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
2049 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
2050
2051 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
2052 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
2053 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE),
2054
2055 /* The following queries must be at the end of the list because their
2056 * availability is adjusted dynamically based on the DRM version. */
2057 X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
2058 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE),
2059 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE),
2060 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE),
2061 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE),
2062 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE),
2063 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE),
2064 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE),
2065 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE),
2066 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE),
2067 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE),
2068 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE),
2069 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE),
2070 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE),
2071 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE),
2072 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE),
2073 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE),
2074 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE),
2075 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE),
2076 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY, UINT64, AVERAGE),
2077 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE),
2078 X("GPU-ce-busy", GPU_CE_BUSY, UINT64, AVERAGE),
2079 };
2080
2081 #undef X
2082 #undef XG
2083 #undef XFULL
2084
2085 static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
2086 {
2087 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
2088 return ARRAY_SIZE(r600_driver_query_list);
2089 else if (rscreen->info.drm_major == 3) {
2090 if (rscreen->chip_class >= VI)
2091 return ARRAY_SIZE(r600_driver_query_list);
2092 else
2093 return ARRAY_SIZE(r600_driver_query_list) - 7;
2094 }
2095 else
2096 return ARRAY_SIZE(r600_driver_query_list) - 25;
2097 }
2098
2099 static int r600_get_driver_query_info(struct pipe_screen *screen,
2100 unsigned index,
2101 struct pipe_driver_query_info *info)
2102 {
2103 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
2104 unsigned num_queries = r600_get_num_queries(rscreen);
2105
2106 if (!info) {
2107 unsigned num_perfcounters =
2108 r600_get_perfcounter_info(rscreen, 0, NULL);
2109
2110 return num_queries + num_perfcounters;
2111 }
2112
2113 if (index >= num_queries)
2114 return r600_get_perfcounter_info(rscreen, index - num_queries, info);
2115
2116 *info = r600_driver_query_list[index];
2117
2118 switch (info->query_type) {
2119 case R600_QUERY_REQUESTED_VRAM:
2120 case R600_QUERY_VRAM_USAGE:
2121 case R600_QUERY_MAPPED_VRAM:
2122 info->max_value.u64 = rscreen->info.vram_size;
2123 break;
2124 case R600_QUERY_REQUESTED_GTT:
2125 case R600_QUERY_GTT_USAGE:
2126 case R600_QUERY_MAPPED_GTT:
2127 info->max_value.u64 = rscreen->info.gart_size;
2128 break;
2129 case R600_QUERY_GPU_TEMPERATURE:
2130 info->max_value.u64 = 125;
2131 break;
2132 case R600_QUERY_VRAM_VIS_USAGE:
2133 info->max_value.u64 = rscreen->info.vram_vis_size;
2134 break;
2135 }
2136
2137 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
2138 info->group_id += rscreen->perfcounters->num_groups;
2139
2140 return 1;
2141 }
2142
2143 /* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
2144 * performance counter groups, so be careful when changing this and related
2145 * functions.
2146 */
2147 static int r600_get_driver_query_group_info(struct pipe_screen *screen,
2148 unsigned index,
2149 struct pipe_driver_query_group_info *info)
2150 {
2151 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
2152 unsigned num_pc_groups = 0;
2153
2154 if (rscreen->perfcounters)
2155 num_pc_groups = rscreen->perfcounters->num_groups;
2156
2157 if (!info)
2158 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
2159
2160 if (index < num_pc_groups)
2161 return r600_get_perfcounter_group_info(rscreen, index, info);
2162
2163 index -= num_pc_groups;
2164 if (index >= R600_NUM_SW_QUERY_GROUPS)
2165 return 0;
2166
2167 info->name = "GPIN";
2168 info->max_active_queries = 5;
2169 info->num_queries = 5;
2170 return 1;
2171 }
2172
2173 void r600_query_init(struct r600_common_context *rctx)
2174 {
2175 rctx->b.create_query = r600_create_query;
2176 rctx->b.create_batch_query = r600_create_batch_query;
2177 rctx->b.destroy_query = r600_destroy_query;
2178 rctx->b.begin_query = r600_begin_query;
2179 rctx->b.end_query = r600_end_query;
2180 rctx->b.get_query_result = r600_get_query_result;
2181 rctx->b.get_query_result_resource = r600_get_query_result_resource;
2182 rctx->render_cond_atom.emit = r600_emit_query_predication;
2183
2184 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
2185 rctx->b.render_condition = r600_render_condition;
2186
2187 LIST_INITHEAD(&rctx->active_queries);
2188 }
2189
2190 void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
2191 {
2192 rscreen->b.get_driver_query_info = r600_get_driver_query_info;
2193 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
2194 }