2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "r600_pipe.h"
25 #include "util/u_memory.h"
27 static bool r600_is_timer_query(unsigned type
)
29 return type
== PIPE_QUERY_TIME_ELAPSED
||
30 type
== PIPE_QUERY_TIMESTAMP
||
31 type
== PIPE_QUERY_TIMESTAMP_DISJOINT
;
34 static bool r600_query_needs_begin(unsigned type
)
36 return type
!= PIPE_QUERY_GPU_FINISHED
&&
37 type
!= PIPE_QUERY_TIMESTAMP
;
40 static struct r600_resource
*r600_new_query_buffer(struct r600_context
*ctx
, unsigned type
)
42 unsigned j
, i
, num_results
, buf_size
= 4096;
44 /* Queries are normally read by the CPU after
45 * being written by the gpu, hence staging is probably a good
48 struct r600_resource
*buf
= (struct r600_resource
*)
49 pipe_buffer_create(&ctx
->screen
->screen
, PIPE_BIND_CUSTOM
,
50 PIPE_USAGE_STAGING
, buf_size
);
53 case PIPE_QUERY_OCCLUSION_COUNTER
:
54 case PIPE_QUERY_OCCLUSION_PREDICATE
:
55 results
= r600_buffer_mmap_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
56 memset(results
, 0, buf_size
);
58 /* Set top bits for unused backends. */
59 num_results
= buf_size
/ (16 * ctx
->max_db
);
60 for (j
= 0; j
< num_results
; j
++) {
61 for (i
= 0; i
< ctx
->max_db
; i
++) {
62 if (!(ctx
->backend_mask
& (1<<i
))) {
63 results
[(i
* 4)+1] = 0x80000000;
64 results
[(i
* 4)+3] = 0x80000000;
67 results
+= 4 * ctx
->max_db
;
69 ctx
->ws
->buffer_unmap(buf
->cs_buf
);
71 case PIPE_QUERY_TIME_ELAPSED
:
72 case PIPE_QUERY_TIMESTAMP
:
74 case PIPE_QUERY_PRIMITIVES_EMITTED
:
75 case PIPE_QUERY_PRIMITIVES_GENERATED
:
76 case PIPE_QUERY_SO_STATISTICS
:
77 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
78 results
= r600_buffer_mmap_sync_with_rings(ctx
, buf
, PIPE_TRANSFER_WRITE
);
79 memset(results
, 0, buf_size
);
80 ctx
->ws
->buffer_unmap(buf
->cs_buf
);
88 static void r600_update_occlusion_query_state(struct r600_context
*rctx
,
89 unsigned type
, int diff
)
91 if (type
== PIPE_QUERY_OCCLUSION_COUNTER
||
92 type
== PIPE_QUERY_OCCLUSION_PREDICATE
) {
95 rctx
->num_occlusion_queries
+= diff
;
96 assert(rctx
->num_occlusion_queries
>= 0);
98 enable
= rctx
->num_occlusion_queries
!= 0;
100 if (rctx
->db_misc_state
.occlusion_query_enabled
!= enable
) {
101 rctx
->db_misc_state
.occlusion_query_enabled
= enable
;
102 rctx
->db_misc_state
.atom
.dirty
= true;
107 static void r600_emit_query_begin(struct r600_context
*ctx
, struct r600_query
*query
)
109 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
112 r600_update_occlusion_query_state(ctx
, query
->type
, 1);
113 r600_need_cs_space(ctx
, query
->num_cs_dw
* 2, TRUE
);
115 /* Get a new query buffer if needed. */
116 if (query
->buffer
.results_end
+ query
->result_size
> query
->buffer
.buf
->b
.b
.width0
) {
117 struct r600_query_buffer
*qbuf
= MALLOC_STRUCT(r600_query_buffer
);
118 *qbuf
= query
->buffer
;
119 query
->buffer
.buf
= r600_new_query_buffer(ctx
, query
->type
);
120 query
->buffer
.results_end
= 0;
121 query
->buffer
.previous
= qbuf
;
124 /* emit begin query */
125 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)query
->buffer
.buf
);
126 va
+= query
->buffer
.results_end
;
128 switch (query
->type
) {
129 case PIPE_QUERY_OCCLUSION_COUNTER
:
130 case PIPE_QUERY_OCCLUSION_PREDICATE
:
131 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
132 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
133 cs
->buf
[cs
->cdw
++] = va
;
134 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
136 case PIPE_QUERY_PRIMITIVES_EMITTED
:
137 case PIPE_QUERY_PRIMITIVES_GENERATED
:
138 case PIPE_QUERY_SO_STATISTICS
:
139 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
140 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
141 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3);
142 cs
->buf
[cs
->cdw
++] = va
;
143 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
145 case PIPE_QUERY_TIME_ELAPSED
:
146 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
147 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
148 cs
->buf
[cs
->cdw
++] = va
;
149 cs
->buf
[cs
->cdw
++] = (3 << 29) | ((va
>> 32UL) & 0xFF);
150 cs
->buf
[cs
->cdw
++] = 0;
151 cs
->buf
[cs
->cdw
++] = 0;
156 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
157 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
);
159 if (!r600_is_timer_query(query
->type
)) {
160 ctx
->num_cs_dw_nontimer_queries_suspend
+= query
->num_cs_dw
;
164 static void r600_emit_query_end(struct r600_context
*ctx
, struct r600_query
*query
)
166 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
169 /* The queries which need begin already called this in begin_query. */
170 if (!r600_query_needs_begin(query
->type
)) {
171 r600_need_cs_space(ctx
, query
->num_cs_dw
, FALSE
);
174 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)query
->buffer
.buf
);
176 switch (query
->type
) {
177 case PIPE_QUERY_OCCLUSION_COUNTER
:
178 case PIPE_QUERY_OCCLUSION_PREDICATE
:
179 va
+= query
->buffer
.results_end
+ 8;
180 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
181 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
182 cs
->buf
[cs
->cdw
++] = va
;
183 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
185 case PIPE_QUERY_PRIMITIVES_EMITTED
:
186 case PIPE_QUERY_PRIMITIVES_GENERATED
:
187 case PIPE_QUERY_SO_STATISTICS
:
188 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
189 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
190 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS
) | EVENT_INDEX(3);
191 cs
->buf
[cs
->cdw
++] = query
->buffer
.results_end
+ query
->result_size
/2;
192 cs
->buf
[cs
->cdw
++] = 0;
194 case PIPE_QUERY_TIME_ELAPSED
:
195 va
+= query
->buffer
.results_end
+ query
->result_size
/2;
197 case PIPE_QUERY_TIMESTAMP
:
198 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
199 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
200 cs
->buf
[cs
->cdw
++] = va
;
201 cs
->buf
[cs
->cdw
++] = (3 << 29) | ((va
>> 32UL) & 0xFF);
202 cs
->buf
[cs
->cdw
++] = 0;
203 cs
->buf
[cs
->cdw
++] = 0;
208 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
209 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, query
->buffer
.buf
, RADEON_USAGE_WRITE
);
211 query
->buffer
.results_end
+= query
->result_size
;
213 if (r600_query_needs_begin(query
->type
)) {
214 if (!r600_is_timer_query(query
->type
)) {
215 ctx
->num_cs_dw_nontimer_queries_suspend
-= query
->num_cs_dw
;
219 r600_update_occlusion_query_state(ctx
, query
->type
, -1);
222 static void r600_emit_query_predication(struct r600_context
*ctx
, struct r600_query
*query
,
223 int operation
, bool flag_wait
)
225 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
227 if (operation
== PREDICATION_OP_CLEAR
) {
228 r600_need_cs_space(ctx
, 3, FALSE
);
230 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_PREDICATION
, 1, 0);
231 cs
->buf
[cs
->cdw
++] = 0;
232 cs
->buf
[cs
->cdw
++] = PRED_OP(PREDICATION_OP_CLEAR
);
234 struct r600_query_buffer
*qbuf
;
238 /* Find how many results there are. */
240 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
241 count
+= qbuf
->results_end
/ query
->result_size
;
244 r600_need_cs_space(ctx
, 5 * count
, TRUE
);
246 op
= PRED_OP(operation
) | PREDICATION_DRAW_VISIBLE
|
247 (flag_wait
? PREDICATION_HINT_WAIT
: PREDICATION_HINT_NOWAIT_DRAW
);
249 /* emit predicate packets for all data blocks */
250 for (qbuf
= &query
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
251 unsigned results_base
= 0;
252 uint64_t va
= r600_resource_va(&ctx
->screen
->screen
, &qbuf
->buf
->b
.b
);
254 while (results_base
< qbuf
->results_end
) {
255 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_PREDICATION
, 1, 0);
256 cs
->buf
[cs
->cdw
++] = (va
+ results_base
) & 0xFFFFFFFFUL
;
257 cs
->buf
[cs
->cdw
++] = op
| (((va
+ results_base
) >> 32UL) & 0xFF);
258 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
259 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, qbuf
->buf
, RADEON_USAGE_READ
);
260 results_base
+= query
->result_size
;
262 /* set CONTINUE bit for all packets except the first */
263 op
|= PREDICATION_CONTINUE
;
269 static struct pipe_query
*r600_create_query(struct pipe_context
*ctx
, unsigned query_type
)
271 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
273 struct r600_query
*query
;
275 query
= CALLOC_STRUCT(r600_query
);
279 query
->type
= query_type
;
281 switch (query_type
) {
282 case PIPE_QUERY_OCCLUSION_COUNTER
:
283 case PIPE_QUERY_OCCLUSION_PREDICATE
:
284 query
->result_size
= 16 * rctx
->max_db
;
285 query
->num_cs_dw
= 6;
287 case PIPE_QUERY_TIME_ELAPSED
:
288 query
->result_size
= 16;
289 query
->num_cs_dw
= 8;
291 case PIPE_QUERY_TIMESTAMP
:
292 query
->result_size
= 8;
293 query
->num_cs_dw
= 8;
295 case PIPE_QUERY_PRIMITIVES_EMITTED
:
296 case PIPE_QUERY_PRIMITIVES_GENERATED
:
297 case PIPE_QUERY_SO_STATISTICS
:
298 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
299 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
300 query
->result_size
= 32;
301 query
->num_cs_dw
= 6;
309 query
->buffer
.buf
= r600_new_query_buffer(rctx
, query_type
);
310 if (!query
->buffer
.buf
) {
314 return (struct pipe_query
*)query
;
317 static void r600_destroy_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
319 struct r600_query
*rquery
= (struct r600_query
*)query
;
320 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
322 /* Release all query buffers. */
324 struct r600_query_buffer
*qbuf
= prev
;
325 prev
= prev
->previous
;
326 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
330 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
334 static void r600_begin_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
336 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
337 struct r600_query
*rquery
= (struct r600_query
*)query
;
338 struct r600_query_buffer
*prev
= rquery
->buffer
.previous
;
340 if (!r600_query_needs_begin(rquery
->type
)) {
345 /* Discard the old query buffers. */
347 struct r600_query_buffer
*qbuf
= prev
;
348 prev
= prev
->previous
;
349 pipe_resource_reference((struct pipe_resource
**)&qbuf
->buf
, NULL
);
353 /* Obtain a new buffer if the current one can't be mapped without a stall. */
354 if (r600_rings_is_buffer_referenced(rctx
, rquery
->buffer
.buf
->cs_buf
, RADEON_USAGE_READWRITE
) ||
355 rctx
->ws
->buffer_is_busy(rquery
->buffer
.buf
->buf
, RADEON_USAGE_READWRITE
)) {
356 pipe_resource_reference((struct pipe_resource
**)&rquery
->buffer
.buf
, NULL
);
357 rquery
->buffer
.buf
= r600_new_query_buffer(rctx
, rquery
->type
);
360 rquery
->buffer
.results_end
= 0;
361 rquery
->buffer
.previous
= NULL
;
363 r600_emit_query_begin(rctx
, rquery
);
365 if (!r600_is_timer_query(rquery
->type
)) {
366 LIST_ADDTAIL(&rquery
->list
, &rctx
->active_nontimer_queries
);
370 static void r600_end_query(struct pipe_context
*ctx
, struct pipe_query
*query
)
372 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
373 struct r600_query
*rquery
= (struct r600_query
*)query
;
375 r600_emit_query_end(rctx
, rquery
);
377 if (r600_query_needs_begin(rquery
->type
) && !r600_is_timer_query(rquery
->type
)) {
378 LIST_DELINIT(&rquery
->list
);
382 static unsigned r600_query_read_result(char *map
, unsigned start_index
, unsigned end_index
,
383 bool test_status_bit
)
385 uint32_t *current_result
= (uint32_t*)map
;
388 start
= (uint64_t)current_result
[start_index
] |
389 (uint64_t)current_result
[start_index
+1] << 32;
390 end
= (uint64_t)current_result
[end_index
] |
391 (uint64_t)current_result
[end_index
+1] << 32;
393 if (!test_status_bit
||
394 ((start
& 0x8000000000000000UL
) && (end
& 0x8000000000000000UL
))) {
400 static boolean
r600_get_query_buffer_result(struct r600_context
*ctx
,
401 struct r600_query
*query
,
402 struct r600_query_buffer
*qbuf
,
404 union pipe_query_result
*result
)
406 unsigned results_base
= 0;
409 map
= r600_buffer_mmap_sync_with_rings(ctx
, qbuf
->buf
,
411 (wait
? 0 : PIPE_TRANSFER_DONTBLOCK
));
415 /* count all results across all data blocks */
416 switch (query
->type
) {
417 case PIPE_QUERY_OCCLUSION_COUNTER
:
418 while (results_base
!= qbuf
->results_end
) {
420 r600_query_read_result(map
+ results_base
, 0, 2, true);
424 case PIPE_QUERY_OCCLUSION_PREDICATE
:
425 while (results_base
!= qbuf
->results_end
) {
426 result
->b
= result
->b
||
427 r600_query_read_result(map
+ results_base
, 0, 2, true) != 0;
431 case PIPE_QUERY_TIME_ELAPSED
:
432 while (results_base
!= qbuf
->results_end
) {
434 r600_query_read_result(map
+ results_base
, 0, 2, false);
435 results_base
+= query
->result_size
;
438 case PIPE_QUERY_TIMESTAMP
:
440 uint32_t *current_result
= (uint32_t*)map
;
441 result
->u64
= (uint64_t)current_result
[0] |
442 (uint64_t)current_result
[1] << 32;
445 case PIPE_QUERY_PRIMITIVES_EMITTED
:
446 /* SAMPLE_STREAMOUTSTATS stores this structure:
448 * u64 NumPrimitivesWritten;
449 * u64 PrimitiveStorageNeeded;
451 * We only need NumPrimitivesWritten here. */
452 while (results_base
!= qbuf
->results_end
) {
454 r600_query_read_result(map
+ results_base
, 2, 6, true);
455 results_base
+= query
->result_size
;
458 case PIPE_QUERY_PRIMITIVES_GENERATED
:
459 /* Here we read PrimitiveStorageNeeded. */
460 while (results_base
!= qbuf
->results_end
) {
462 r600_query_read_result(map
+ results_base
, 0, 4, true);
463 results_base
+= query
->result_size
;
466 case PIPE_QUERY_SO_STATISTICS
:
467 while (results_base
!= qbuf
->results_end
) {
468 result
->so_statistics
.num_primitives_written
+=
469 r600_query_read_result(map
+ results_base
, 2, 6, true);
470 result
->so_statistics
.primitives_storage_needed
+=
471 r600_query_read_result(map
+ results_base
, 0, 4, true);
472 results_base
+= query
->result_size
;
475 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
476 while (results_base
!= qbuf
->results_end
) {
477 result
->b
= result
->b
||
478 r600_query_read_result(map
+ results_base
, 2, 6, true) !=
479 r600_query_read_result(map
+ results_base
, 0, 4, true);
480 results_base
+= query
->result_size
;
487 ctx
->ws
->buffer_unmap(qbuf
->buf
->cs_buf
);
491 static boolean
r600_get_query_result(struct pipe_context
*ctx
,
492 struct pipe_query
*query
,
493 boolean wait
, union pipe_query_result
*result
)
495 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
496 struct r600_query
*rquery
= (struct r600_query
*)query
;
497 struct r600_query_buffer
*qbuf
;
499 util_query_clear_result(result
, rquery
->type
);
501 for (qbuf
= &rquery
->buffer
; qbuf
; qbuf
= qbuf
->previous
) {
502 if (!r600_get_query_buffer_result(rctx
, rquery
, qbuf
, wait
, result
)) {
507 /* Convert the time to expected units. */
508 if (rquery
->type
== PIPE_QUERY_TIME_ELAPSED
||
509 rquery
->type
== PIPE_QUERY_TIMESTAMP
) {
510 result
->u64
= (1000000 * result
->u64
) / rctx
->screen
->info
.r600_clock_crystal_freq
;
515 static void r600_render_condition(struct pipe_context
*ctx
,
516 struct pipe_query
*query
,
519 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
520 struct r600_query
*rquery
= (struct r600_query
*)query
;
521 bool wait_flag
= false;
523 rctx
->current_render_cond
= query
;
524 rctx
->current_render_cond_mode
= mode
;
527 if (rctx
->predicate_drawing
) {
528 rctx
->predicate_drawing
= false;
529 r600_emit_query_predication(rctx
, NULL
, PREDICATION_OP_CLEAR
, false);
534 if (mode
== PIPE_RENDER_COND_WAIT
||
535 mode
== PIPE_RENDER_COND_BY_REGION_WAIT
) {
539 rctx
->predicate_drawing
= true;
541 switch (rquery
->type
) {
542 case PIPE_QUERY_OCCLUSION_COUNTER
:
543 case PIPE_QUERY_OCCLUSION_PREDICATE
:
544 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_ZPASS
, wait_flag
);
546 case PIPE_QUERY_PRIMITIVES_EMITTED
:
547 case PIPE_QUERY_PRIMITIVES_GENERATED
:
548 case PIPE_QUERY_SO_STATISTICS
:
549 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
550 r600_emit_query_predication(rctx
, rquery
, PREDICATION_OP_PRIMCOUNT
, wait_flag
);
557 void r600_suspend_nontimer_queries(struct r600_context
*ctx
)
559 struct r600_query
*query
;
561 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_queries
, list
) {
562 r600_emit_query_end(ctx
, query
);
564 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
567 void r600_resume_nontimer_queries(struct r600_context
*ctx
)
569 struct r600_query
*query
;
571 assert(ctx
->num_cs_dw_nontimer_queries_suspend
== 0);
573 LIST_FOR_EACH_ENTRY(query
, &ctx
->active_nontimer_queries
, list
) {
574 r600_emit_query_begin(ctx
, query
);
578 void r600_init_query_functions(struct r600_context
*rctx
)
580 rctx
->context
.create_query
= r600_create_query
;
581 rctx
->context
.destroy_query
= r600_destroy_query
;
582 rctx
->context
.begin_query
= r600_begin_query
;
583 rctx
->context
.end_query
= r600_end_query
;
584 rctx
->context
.get_query_result
= r600_get_query_result
;
586 if (rctx
->screen
->info
.r600_num_backends
> 0)
587 rctx
->context
.render_condition
= r600_render_condition
;