53440ae734a5f7588a64d06e3c710664f3928b94
[mesa.git] / src / gallium / drivers / r600 / r600_query.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_pipe.h"
24 #include "r600d.h"
25 #include "util/u_memory.h"
26 #include "r600_hw_context_priv.h"
27
28 static bool r600_is_timer_query(unsigned type)
29 {
30 return type == PIPE_QUERY_TIME_ELAPSED ||
31 type == PIPE_QUERY_TIMESTAMP ||
32 type == PIPE_QUERY_TIMESTAMP_DISJOINT;
33 }
34
35 static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, unsigned type)
36 {
37 unsigned j, i, num_results, buf_size = 4096;
38 uint32_t *results;
39 /* Queries are normally read by the CPU after
40 * being written by the gpu, hence staging is probably a good
41 * usage pattern.
42 */
43 struct r600_resource *buf = (struct r600_resource*)
44 pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
45 PIPE_USAGE_STAGING, buf_size);
46
47 switch (type) {
48 case PIPE_QUERY_OCCLUSION_COUNTER:
49 case PIPE_QUERY_OCCLUSION_PREDICATE:
50 results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE);
51 memset(results, 0, buf_size);
52
53 /* Set top bits for unused backends. */
54 num_results = buf_size / (16 * ctx->max_db);
55 for (j = 0; j < num_results; j++) {
56 for (i = 0; i < ctx->max_db; i++) {
57 if (!(ctx->backend_mask & (1<<i))) {
58 results[(i * 4)+1] = 0x80000000;
59 results[(i * 4)+3] = 0x80000000;
60 }
61 }
62 results += 4 * ctx->max_db;
63 }
64 ctx->ws->buffer_unmap(buf->buf);
65 break;
66 case PIPE_QUERY_TIME_ELAPSED:
67 break;
68 case PIPE_QUERY_PRIMITIVES_EMITTED:
69 case PIPE_QUERY_PRIMITIVES_GENERATED:
70 case PIPE_QUERY_SO_STATISTICS:
71 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
72 results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE);
73 memset(results, 0, buf_size);
74 ctx->ws->buffer_unmap(buf->buf);
75 break;
76 default:
77 assert(0);
78 }
79 return buf;
80 }
81
82 static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *query)
83 {
84 struct radeon_winsys_cs *cs = ctx->cs;
85 uint64_t va;
86
87 r600_need_cs_space(ctx, query->num_cs_dw * 2, TRUE);
88
89 /* Get a new query buffer if needed. */
90 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.b.width0) {
91 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
92 *qbuf = query->buffer;
93 query->buffer.buf = r600_new_query_buffer(ctx, query->type);
94 query->buffer.results_end = 0;
95 query->buffer.previous = qbuf;
96 }
97
98 /* emit begin query */
99 va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf);
100 va += query->buffer.results_end;
101
102 switch (query->type) {
103 case PIPE_QUERY_OCCLUSION_COUNTER:
104 case PIPE_QUERY_OCCLUSION_PREDICATE:
105 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
106 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
107 cs->buf[cs->cdw++] = va;
108 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
109 break;
110 case PIPE_QUERY_PRIMITIVES_EMITTED:
111 case PIPE_QUERY_PRIMITIVES_GENERATED:
112 case PIPE_QUERY_SO_STATISTICS:
113 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
114 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
115 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3);
116 cs->buf[cs->cdw++] = va;
117 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
118 break;
119 case PIPE_QUERY_TIME_ELAPSED:
120 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
121 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
122 cs->buf[cs->cdw++] = va;
123 cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF);
124 cs->buf[cs->cdw++] = 0;
125 cs->buf[cs->cdw++] = 0;
126 break;
127 default:
128 assert(0);
129 }
130 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
131 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
132
133 if (r600_is_timer_query(query->type)) {
134 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
135 } else {
136 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
137 }
138 }
139
140 static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query)
141 {
142 struct radeon_winsys_cs *cs = ctx->cs;
143 uint64_t va;
144
145 va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf);
146 /* emit end query */
147 switch (query->type) {
148 case PIPE_QUERY_OCCLUSION_COUNTER:
149 case PIPE_QUERY_OCCLUSION_PREDICATE:
150 va += query->buffer.results_end + 8;
151 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
152 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
153 cs->buf[cs->cdw++] = va;
154 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
155 break;
156 case PIPE_QUERY_PRIMITIVES_EMITTED:
157 case PIPE_QUERY_PRIMITIVES_GENERATED:
158 case PIPE_QUERY_SO_STATISTICS:
159 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
160 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
161 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3);
162 cs->buf[cs->cdw++] = query->buffer.results_end + query->result_size/2;
163 cs->buf[cs->cdw++] = 0;
164 break;
165 case PIPE_QUERY_TIME_ELAPSED:
166 va += query->buffer.results_end + query->result_size/2;
167 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
168 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
169 cs->buf[cs->cdw++] = va;
170 cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF);
171 cs->buf[cs->cdw++] = 0;
172 cs->buf[cs->cdw++] = 0;
173 break;
174 default:
175 assert(0);
176 }
177 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
178 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
179
180 query->buffer.results_end += query->result_size;
181
182 if (r600_is_timer_query(query->type)) {
183 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
184 } else {
185 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
186 }
187 }
188
189 static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query,
190 int operation, bool flag_wait)
191 {
192 struct radeon_winsys_cs *cs = ctx->cs;
193
194 if (operation == PREDICATION_OP_CLEAR) {
195 r600_need_cs_space(ctx, 3, FALSE);
196
197 cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
198 cs->buf[cs->cdw++] = 0;
199 cs->buf[cs->cdw++] = PRED_OP(PREDICATION_OP_CLEAR);
200 } else {
201 struct r600_query_buffer *qbuf;
202 unsigned count;
203 uint32_t op;
204
205 /* Find how many results there are. */
206 count = 0;
207 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
208 count += qbuf->results_end / query->result_size;
209 }
210
211 r600_need_cs_space(ctx, 5 * count, TRUE);
212
213 op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE |
214 (flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW);
215
216 /* emit predicate packets for all data blocks */
217 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
218 unsigned results_base = 0;
219 uint64_t va = r600_resource_va(&ctx->screen->screen, &qbuf->buf->b.b.b);
220
221 while (results_base < qbuf->results_end) {
222 cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
223 cs->buf[cs->cdw++] = (va + results_base) & 0xFFFFFFFFUL;
224 cs->buf[cs->cdw++] = op | (((va + results_base) >> 32UL) & 0xFF);
225 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
226 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, qbuf->buf, RADEON_USAGE_READ);
227 results_base += query->result_size;
228
229 /* set CONTINUE bit for all packets except the first */
230 op |= PREDICATION_CONTINUE;
231 }
232 } while (qbuf);
233 }
234 }
235
236 static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type)
237 {
238 struct r600_context *rctx = (struct r600_context *)ctx;
239
240 struct r600_query *query;
241
242 query = CALLOC_STRUCT(r600_query);
243 if (query == NULL)
244 return NULL;
245
246 query->type = query_type;
247
248 switch (query_type) {
249 case PIPE_QUERY_OCCLUSION_COUNTER:
250 case PIPE_QUERY_OCCLUSION_PREDICATE:
251 query->result_size = 16 * rctx->max_db;
252 query->num_cs_dw = 6;
253 break;
254 case PIPE_QUERY_TIME_ELAPSED:
255 query->result_size = 16;
256 query->num_cs_dw = 8;
257 break;
258 case PIPE_QUERY_PRIMITIVES_EMITTED:
259 case PIPE_QUERY_PRIMITIVES_GENERATED:
260 case PIPE_QUERY_SO_STATISTICS:
261 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
262 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
263 query->result_size = 32;
264 query->num_cs_dw = 6;
265 break;
266 default:
267 assert(0);
268 FREE(query);
269 return NULL;
270 }
271
272 query->buffer.buf = r600_new_query_buffer(rctx, query_type);
273 if (!query->buffer.buf) {
274 FREE(query);
275 return NULL;
276 }
277 return (struct pipe_query*)query;
278 }
279
280 static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
281 {
282 struct r600_query *rquery = (struct r600_query*)query;
283 struct r600_query_buffer *prev = rquery->buffer.previous;
284
285 /* Release all query buffers. */
286 while (prev) {
287 struct r600_query_buffer *qbuf = prev;
288 prev = prev->previous;
289 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
290 FREE(qbuf);
291 }
292
293 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
294 FREE(query);
295 }
296
297 static void r600_update_occlusion_query_state(struct r600_context *rctx,
298 unsigned type, int diff)
299 {
300 if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
301 type == PIPE_QUERY_OCCLUSION_PREDICATE) {
302 bool enable;
303
304 rctx->num_occlusion_queries += diff;
305 assert(rctx->num_occlusion_queries >= 0);
306
307 enable = rctx->num_occlusion_queries != 0;
308
309 if (rctx->atom_db_misc_state.occlusion_query_enabled != enable) {
310 rctx->atom_db_misc_state.occlusion_query_enabled = enable;
311 r600_atom_dirty(rctx, &rctx->atom_db_misc_state.atom);
312 }
313 }
314 }
315
316 static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query)
317 {
318 struct r600_context *rctx = (struct r600_context *)ctx;
319 struct r600_query *rquery = (struct r600_query *)query;
320 /* Discard the old query buffers. */
321 struct r600_query_buffer *prev = rquery->buffer.previous;
322
323 while (prev) {
324 struct r600_query_buffer *qbuf = prev;
325 prev = prev->previous;
326 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
327 FREE(qbuf);
328 }
329
330 /* Obtain a new buffer if the current one can't be mapped without a stall. */
331 if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rquery->buffer.buf->cs_buf) ||
332 rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) {
333 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
334 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
335 }
336
337 rquery->buffer.results_end = 0;
338 rquery->buffer.previous = NULL;
339
340 r600_update_occlusion_query_state(rctx, rquery->type, 1);
341
342 r600_emit_query_begin(rctx, rquery);
343
344 if (r600_is_timer_query(rquery->type)) {
345 LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
346 } else {
347 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
348 }
349 }
350
351 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
352 {
353 struct r600_context *rctx = (struct r600_context *)ctx;
354 struct r600_query *rquery = (struct r600_query *)query;
355
356 r600_emit_query_end(rctx, rquery);
357 LIST_DELINIT(&rquery->list);
358
359 r600_update_occlusion_query_state(rctx, rquery->type, -1);
360 }
361
362 static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
363 bool test_status_bit)
364 {
365 uint32_t *current_result = (uint32_t*)map;
366 uint64_t start, end;
367
368 start = (uint64_t)current_result[start_index] |
369 (uint64_t)current_result[start_index+1] << 32;
370 end = (uint64_t)current_result[end_index] |
371 (uint64_t)current_result[end_index+1] << 32;
372
373 if (!test_status_bit ||
374 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
375 return end - start;
376 }
377 return 0;
378 }
379
380 static boolean r600_get_query_buffer_result(struct r600_context *ctx,
381 struct r600_query *query,
382 struct r600_query_buffer *qbuf,
383 boolean wait,
384 union r600_query_result *result)
385 {
386 unsigned results_base = 0;
387 char *map;
388
389 map = ctx->ws->buffer_map(qbuf->buf->buf, ctx->cs,
390 PIPE_TRANSFER_READ |
391 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
392 if (!map)
393 return FALSE;
394
395 /* count all results across all data blocks */
396 switch (query->type) {
397 case PIPE_QUERY_OCCLUSION_COUNTER:
398 while (results_base != qbuf->results_end) {
399 result->u64 +=
400 r600_query_read_result(map + results_base, 0, 2, true);
401 results_base += 16;
402 }
403 break;
404 case PIPE_QUERY_OCCLUSION_PREDICATE:
405 while (results_base != qbuf->results_end) {
406 result->b = result->b ||
407 r600_query_read_result(map + results_base, 0, 2, true) != 0;
408 results_base += 16;
409 }
410 break;
411 case PIPE_QUERY_TIME_ELAPSED:
412 while (results_base != qbuf->results_end) {
413 result->u64 +=
414 r600_query_read_result(map + results_base, 0, 2, false);
415 results_base += query->result_size;
416 }
417 break;
418 case PIPE_QUERY_PRIMITIVES_EMITTED:
419 /* SAMPLE_STREAMOUTSTATS stores this structure:
420 * {
421 * u64 NumPrimitivesWritten;
422 * u64 PrimitiveStorageNeeded;
423 * }
424 * We only need NumPrimitivesWritten here. */
425 while (results_base != qbuf->results_end) {
426 result->u64 +=
427 r600_query_read_result(map + results_base, 2, 6, true);
428 results_base += query->result_size;
429 }
430 break;
431 case PIPE_QUERY_PRIMITIVES_GENERATED:
432 /* Here we read PrimitiveStorageNeeded. */
433 while (results_base != qbuf->results_end) {
434 result->u64 +=
435 r600_query_read_result(map + results_base, 0, 4, true);
436 results_base += query->result_size;
437 }
438 break;
439 case PIPE_QUERY_SO_STATISTICS:
440 while (results_base != qbuf->results_end) {
441 result->so.num_primitives_written +=
442 r600_query_read_result(map + results_base, 2, 6, true);
443 result->so.primitives_storage_needed +=
444 r600_query_read_result(map + results_base, 0, 4, true);
445 results_base += query->result_size;
446 }
447 break;
448 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
449 while (results_base != qbuf->results_end) {
450 result->b = result->b ||
451 r600_query_read_result(map + results_base, 2, 6, true) !=
452 r600_query_read_result(map + results_base, 0, 4, true);
453 results_base += query->result_size;
454 }
455 break;
456 default:
457 assert(0);
458 }
459
460 ctx->ws->buffer_unmap(qbuf->buf->buf);
461 return TRUE;
462 }
463
464 static boolean r600_get_query_result(struct pipe_context *ctx,
465 struct pipe_query *query,
466 boolean wait, void *vresult)
467 {
468 struct r600_context *rctx = (struct r600_context *)ctx;
469 struct r600_query *rquery = (struct r600_query *)query;
470 boolean *result_b = (boolean*)vresult;
471 uint64_t *result_u64 = (uint64_t*)vresult;
472 union r600_query_result result;
473 struct pipe_query_data_so_statistics *result_so =
474 (struct pipe_query_data_so_statistics*)vresult;
475 struct r600_query_buffer *qbuf;
476
477 memset(&result, 0, sizeof(result));
478
479 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
480 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, &result)) {
481 return FALSE;
482 }
483 }
484
485 switch (rquery->type) {
486 case PIPE_QUERY_OCCLUSION_COUNTER:
487 case PIPE_QUERY_PRIMITIVES_EMITTED:
488 case PIPE_QUERY_PRIMITIVES_GENERATED:
489 *result_u64 = result.u64;
490 break;
491 case PIPE_QUERY_OCCLUSION_PREDICATE:
492 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
493 *result_b = result.b;
494 break;
495 case PIPE_QUERY_TIME_ELAPSED:
496 *result_u64 = (1000000 * result.u64) / rctx->screen->info.r600_clock_crystal_freq;
497 break;
498 case PIPE_QUERY_SO_STATISTICS:
499 *result_so = result.so;
500 break;
501 default:
502 assert(0);
503 }
504 return TRUE;
505 }
506
507 static void r600_render_condition(struct pipe_context *ctx,
508 struct pipe_query *query,
509 uint mode)
510 {
511 struct r600_context *rctx = (struct r600_context *)ctx;
512 struct r600_query *rquery = (struct r600_query *)query;
513 bool wait_flag = false;
514
515 rctx->current_render_cond = query;
516 rctx->current_render_cond_mode = mode;
517
518 if (query == NULL) {
519 if (rctx->predicate_drawing) {
520 rctx->predicate_drawing = false;
521 r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false);
522 }
523 return;
524 }
525
526 if (mode == PIPE_RENDER_COND_WAIT ||
527 mode == PIPE_RENDER_COND_BY_REGION_WAIT) {
528 wait_flag = true;
529 }
530
531 rctx->predicate_drawing = true;
532
533 switch (rquery->type) {
534 case PIPE_QUERY_OCCLUSION_COUNTER:
535 case PIPE_QUERY_OCCLUSION_PREDICATE:
536 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag);
537 break;
538 case PIPE_QUERY_PRIMITIVES_EMITTED:
539 case PIPE_QUERY_PRIMITIVES_GENERATED:
540 case PIPE_QUERY_SO_STATISTICS:
541 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
542 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag);
543 break;
544 default:
545 assert(0);
546 }
547 }
548
549 void r600_suspend_nontimer_queries(struct r600_context *ctx)
550 {
551 struct r600_query *query;
552
553 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
554 r600_emit_query_end(ctx, query);
555 }
556 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
557 }
558
559 void r600_resume_nontimer_queries(struct r600_context *ctx)
560 {
561 struct r600_query *query;
562
563 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
564
565 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
566 r600_emit_query_begin(ctx, query);
567 }
568 }
569
570 void r600_suspend_timer_queries(struct r600_context *ctx)
571 {
572 struct r600_query *query;
573
574 LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
575 r600_emit_query_end(ctx, query);
576 }
577
578 assert(ctx->num_cs_dw_timer_queries_suspend == 0);
579 }
580
581 void r600_resume_timer_queries(struct r600_context *ctx)
582 {
583 struct r600_query *query;
584
585 assert(ctx->num_cs_dw_timer_queries_suspend == 0);
586
587 LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
588 r600_emit_query_begin(ctx, query);
589 }
590 }
591
592 void r600_init_query_functions(struct r600_context *rctx)
593 {
594 rctx->context.create_query = r600_create_query;
595 rctx->context.destroy_query = r600_destroy_query;
596 rctx->context.begin_query = r600_begin_query;
597 rctx->context.end_query = r600_end_query;
598 rctx->context.get_query_result = r600_get_query_result;
599
600 if (rctx->screen->info.r600_num_backends > 0)
601 rctx->context.render_condition = r600_render_condition;
602 }