gallium: add bool return to pipe_context::end_query
[mesa.git] / src / gallium / drivers / svga / svga_pipe_query.c
1 /**********************************************************
2 * Copyright 2008-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "pipe/p_state.h"
27 #include "pipe/p_context.h"
28
29 #include "util/u_bitmask.h"
30 #include "util/u_memory.h"
31
32 #include "svga_cmd.h"
33 #include "svga_context.h"
34 #include "svga_screen.h"
35 #include "svga_resource_buffer.h"
36 #include "svga_winsys.h"
37 #include "svga_debug.h"
38
39
40 /* Fixme: want a public base class for all pipe structs, even if there
41 * isn't much in them.
42 */
43 struct pipe_query {
44 int dummy;
45 };
46
47 struct svga_query {
48 struct pipe_query base;
49 unsigned type; /**< PIPE_QUERY_x or SVGA_QUERY_x */
50 SVGA3dQueryType svga_type; /**< SVGA3D_QUERYTYPE_x or unused */
51
52 unsigned id; /** Per-context query identifier */
53
54 struct pipe_fence_handle *fence;
55
56 /** For PIPE_QUERY_OCCLUSION_COUNTER / SVGA3D_QUERYTYPE_OCCLUSION */
57
58 /* For VGPU9 */
59 struct svga_winsys_buffer *hwbuf;
60 volatile SVGA3dQueryResult *queryResult;
61
62 /** For VGPU10 */
63 struct svga_winsys_gb_query *gb_query;
64 SVGA3dDXQueryFlags flags;
65 unsigned offset; /**< offset to the gb_query memory */
66 struct pipe_query *predicate; /** The associated query that can be used for predicate */
67
68 /** For non-GPU SVGA_QUERY_x queries */
69 uint64_t begin_count, end_count;
70 };
71
72
73 /** cast wrapper */
74 static inline struct svga_query *
75 svga_query(struct pipe_query *q)
76 {
77 return (struct svga_query *)q;
78 }
79
80 /**
81 * VGPU9
82 */
83
84 static boolean
85 svga_get_query_result(struct pipe_context *pipe,
86 struct pipe_query *q,
87 boolean wait,
88 union pipe_query_result *result);
89
90 static enum pipe_error
91 define_query_vgpu9(struct svga_context *svga,
92 struct svga_query *sq)
93 {
94 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
95
96 sq->hwbuf = svga_winsys_buffer_create(svga, 1,
97 SVGA_BUFFER_USAGE_PINNED,
98 sizeof *sq->queryResult);
99 if (!sq->hwbuf)
100 return PIPE_ERROR_OUT_OF_MEMORY;
101
102 sq->queryResult = (SVGA3dQueryResult *)
103 sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
104 if (!sq->queryResult) {
105 sws->buffer_destroy(sws, sq->hwbuf);
106 return PIPE_ERROR_OUT_OF_MEMORY;
107 }
108
109 sq->queryResult->totalSize = sizeof *sq->queryResult;
110 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
111
112 /* We request the buffer to be pinned and assume it is always mapped.
113 * The reason is that we don't want to wait for fences when checking the
114 * query status.
115 */
116 sws->buffer_unmap(sws, sq->hwbuf);
117
118 return PIPE_OK;
119 }
120
121 static enum pipe_error
122 begin_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
123 {
124 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
125 enum pipe_error ret = PIPE_OK;
126
127 if (sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) {
128 /* The application doesn't care for the pending query result.
129 * We cannot let go of the existing buffer and just get a new one
130 * because its storage may be reused for other purposes and clobbered
131 * by the host when it determines the query result. So the only
132 * option here is to wait for the existing query's result -- not a
133 * big deal, given that no sane application would do this.
134 */
135 uint64_t result;
136 svga_get_query_result(&svga->pipe, &sq->base, TRUE, (void*)&result);
137 assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING);
138 }
139
140 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
141 sws->fence_reference(sws, &sq->fence, NULL);
142
143 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
144 if (ret != PIPE_OK) {
145 svga_context_flush(svga, NULL);
146 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
147 }
148 return ret;
149 }
150
151 static enum pipe_error
152 end_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
153 {
154 enum pipe_error ret = PIPE_OK;
155
156 /* Set to PENDING before sending EndQuery. */
157 sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING;
158
159 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
160 if (ret != PIPE_OK) {
161 svga_context_flush(svga, NULL);
162 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
163 }
164 return ret;
165 }
166
167 static boolean
168 get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
169 boolean wait, uint64_t *result)
170 {
171 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
172 enum pipe_error ret;
173 SVGA3dQueryState state;
174
175 if (!sq->fence) {
176 /* The query status won't be updated by the host unless
177 * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause
178 * a synchronous wait on the host.
179 */
180 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
181 if (ret != PIPE_OK) {
182 svga_context_flush(svga, NULL);
183 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
184 }
185 assert (ret == PIPE_OK);
186 svga_context_flush(svga, &sq->fence);
187 assert(sq->fence);
188 }
189
190 state = sq->queryResult->state;
191 if (state == SVGA3D_QUERYSTATE_PENDING) {
192 if (!wait)
193 return FALSE;
194 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
195 state = sq->queryResult->state;
196 }
197
198 assert(state == SVGA3D_QUERYSTATE_SUCCEEDED ||
199 state == SVGA3D_QUERYSTATE_FAILED);
200
201 *result = (uint64_t)sq->queryResult->result32;
202 return TRUE;
203 }
204
205
206 /**
207 * VGPU10
208 *
209 * There is one query mob allocated for each context to be shared by all
210 * query types. The mob is used to hold queries's state and result. Since
211 * each query result type is of different length, to ease the query allocation
212 * management, the mob is divided into memory blocks. Each memory block
213 * will hold queries of the same type. Multiple memory blocks can be allocated
214 * for a particular query type.
215 *
216 * Currently each memory block is of 184 bytes. We support up to 128
217 * memory blocks. The query memory size is arbitrary right now.
218 * Each occlusion query takes about 8 bytes. One memory block can accomodate
219 * 23 occlusion queries. 128 of those blocks can support up to 2944 occlusion
220 * queries. That seems reasonable for now. If we think this limit is
221 * not enough, we can increase the limit or try to grow the mob in runtime.
222 * Note, SVGA device does not impose one mob per context for queries,
223 * we could allocate multiple mobs for queries; however, wddm KMD does not
224 * currently support that.
225 *
226 * Also note that the GL guest driver does not issue any of the
227 * following commands: DXMoveQuery, DXBindAllQuery & DXReadbackAllQuery.
228 */
229 #define SVGA_QUERY_MEM_BLOCK_SIZE (sizeof(SVGADXQueryResultUnion) * 2)
230 #define SVGA_QUERY_MEM_SIZE (128 * SVGA_QUERY_MEM_BLOCK_SIZE)
231
232 struct svga_qmem_alloc_entry
233 {
234 unsigned start_offset; /* start offset of the memory block */
235 unsigned block_index; /* block index of the memory block */
236 unsigned query_size; /* query size in this memory block */
237 unsigned nquery; /* number of queries allocated */
238 struct util_bitmask *alloc_mask; /* allocation mask */
239 struct svga_qmem_alloc_entry *next; /* next memory block */
240 };
241
242
243 /**
244 * Allocate a memory block from the query object memory
245 * \return -1 if out of memory, else index of the query memory block
246 */
247 static int
248 allocate_query_block(struct svga_context *svga)
249 {
250 int index;
251 unsigned offset;
252
253 /* Find the next available query block */
254 index = util_bitmask_add(svga->gb_query_alloc_mask);
255
256 if (index == UTIL_BITMASK_INVALID_INDEX)
257 return -1;
258
259 offset = index * SVGA_QUERY_MEM_BLOCK_SIZE;
260 if (offset >= svga->gb_query_len) {
261 unsigned i;
262
263 /**
264 * All the memory blocks are allocated, lets see if there is
265 * any empty memory block around that can be freed up.
266 */
267 index = -1;
268 for (i = 0; i < SVGA_QUERY_MAX && index == -1; i++) {
269 struct svga_qmem_alloc_entry *alloc_entry;
270 struct svga_qmem_alloc_entry *prev_alloc_entry = NULL;
271
272 alloc_entry = svga->gb_query_map[i];
273 while (alloc_entry && index == -1) {
274 if (alloc_entry->nquery == 0) {
275 /* This memory block is empty, it can be recycled. */
276 if (prev_alloc_entry) {
277 prev_alloc_entry->next = alloc_entry->next;
278 } else {
279 svga->gb_query_map[i] = alloc_entry->next;
280 }
281 index = alloc_entry->block_index;
282 } else {
283 prev_alloc_entry = alloc_entry;
284 alloc_entry = alloc_entry->next;
285 }
286 }
287 }
288 }
289
290 return index;
291 }
292
293 /**
294 * Allocate a slot in the specified memory block.
295 * All slots in this memory block are of the same size.
296 *
297 * \return -1 if out of memory, else index of the query slot
298 */
299 static int
300 allocate_query_slot(struct svga_context *svga,
301 struct svga_qmem_alloc_entry *alloc)
302 {
303 int index;
304 unsigned offset;
305
306 /* Find the next available slot */
307 index = util_bitmask_add(alloc->alloc_mask);
308
309 if (index == UTIL_BITMASK_INVALID_INDEX)
310 return -1;
311
312 offset = index * alloc->query_size;
313 if (offset >= SVGA_QUERY_MEM_BLOCK_SIZE)
314 return -1;
315
316 alloc->nquery++;
317
318 return index;
319 }
320
321 /**
322 * Deallocate the specified slot in the memory block.
323 * If all slots are freed up, then deallocate the memory block
324 * as well, so it can be allocated for other query type
325 */
326 static void
327 deallocate_query_slot(struct svga_context *svga,
328 struct svga_qmem_alloc_entry *alloc,
329 unsigned index)
330 {
331 assert(index != UTIL_BITMASK_INVALID_INDEX);
332
333 util_bitmask_clear(alloc->alloc_mask, index);
334 alloc->nquery--;
335
336 /**
337 * Don't worry about deallocating the empty memory block here.
338 * The empty memory block will be recycled when no more memory block
339 * can be allocated.
340 */
341 }
342
343 static struct svga_qmem_alloc_entry *
344 allocate_query_block_entry(struct svga_context *svga,
345 unsigned len)
346 {
347 struct svga_qmem_alloc_entry *alloc_entry;
348 int block_index = -1;
349
350 block_index = allocate_query_block(svga);
351 if (block_index == -1)
352 return NULL;
353 alloc_entry = CALLOC_STRUCT(svga_qmem_alloc_entry);
354 if (!alloc_entry)
355 return NULL;
356
357 alloc_entry->block_index = block_index;
358 alloc_entry->start_offset = block_index * SVGA_QUERY_MEM_BLOCK_SIZE;
359 alloc_entry->nquery = 0;
360 alloc_entry->alloc_mask = util_bitmask_create();
361 alloc_entry->next = NULL;
362 alloc_entry->query_size = len;
363
364 return alloc_entry;
365 }
366
367 /**
368 * Allocate a memory slot for a query of the specified type.
369 * It will first search through the memory blocks that are allocated
370 * for the query type. If no memory slot is available, it will try
371 * to allocate another memory block within the query object memory for
372 * this query type.
373 */
374 static int
375 allocate_query(struct svga_context *svga,
376 SVGA3dQueryType type,
377 unsigned len)
378 {
379 struct svga_qmem_alloc_entry *alloc_entry;
380 int slot_index = -1;
381 unsigned offset;
382
383 assert(type < SVGA_QUERY_MAX);
384
385 alloc_entry = svga->gb_query_map[type];
386
387 if (!alloc_entry) {
388 /**
389 * No query memory block has been allocated for this query type,
390 * allocate one now
391 */
392 alloc_entry = allocate_query_block_entry(svga, len);
393 if (!alloc_entry)
394 return -1;
395 svga->gb_query_map[type] = alloc_entry;
396 }
397
398 /* Allocate a slot within the memory block allocated for this query type */
399 slot_index = allocate_query_slot(svga, alloc_entry);
400
401 if (slot_index == -1) {
402 /* This query memory block is full, allocate another one */
403 alloc_entry = allocate_query_block_entry(svga, len);
404 if (!alloc_entry)
405 return -1;
406 alloc_entry->next = svga->gb_query_map[type];
407 svga->gb_query_map[type] = alloc_entry;
408 slot_index = allocate_query_slot(svga, alloc_entry);
409 }
410
411 assert(slot_index != -1);
412 offset = slot_index * len + alloc_entry->start_offset;
413
414 return offset;
415 }
416
417
418 /**
419 * Deallocate memory slot allocated for the specified query
420 */
421 static void
422 deallocate_query(struct svga_context *svga,
423 struct svga_query *sq)
424 {
425 struct svga_qmem_alloc_entry *alloc_entry;
426 unsigned slot_index;
427 unsigned offset = sq->offset;
428
429 alloc_entry = svga->gb_query_map[sq->svga_type];
430
431 while (alloc_entry) {
432 if (offset >= alloc_entry->start_offset &&
433 offset < alloc_entry->start_offset + SVGA_QUERY_MEM_BLOCK_SIZE) {
434
435 /* The slot belongs to this memory block, deallocate it */
436 slot_index = (offset - alloc_entry->start_offset) /
437 alloc_entry->query_size;
438 deallocate_query_slot(svga, alloc_entry, slot_index);
439 alloc_entry = NULL;
440 } else {
441 alloc_entry = alloc_entry->next;
442 }
443 }
444 }
445
446
447 /**
448 * Destroy the gb query object and all the related query structures
449 */
450 static void
451 destroy_gb_query_obj(struct svga_context *svga)
452 {
453 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
454 unsigned i;
455
456 for (i = 0; i < SVGA_QUERY_MAX; i++) {
457 struct svga_qmem_alloc_entry *alloc_entry, *next;
458 alloc_entry = svga->gb_query_map[i];
459 while (alloc_entry) {
460 next = alloc_entry->next;
461 util_bitmask_destroy(alloc_entry->alloc_mask);
462 FREE(alloc_entry);
463 alloc_entry = next;
464 }
465 svga->gb_query_map[i] = NULL;
466 }
467
468 if (svga->gb_query)
469 sws->query_destroy(sws, svga->gb_query);
470 svga->gb_query = NULL;
471
472 util_bitmask_destroy(svga->gb_query_alloc_mask);
473 }
474
475 /**
476 * Define query and create the gb query object if it is not already created.
477 * There is only one gb query object per context which will be shared by
478 * queries of all types.
479 */
480 static enum pipe_error
481 define_query_vgpu10(struct svga_context *svga,
482 struct svga_query *sq, int resultLen)
483 {
484 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
485 int qlen;
486 enum pipe_error ret = PIPE_OK;
487
488 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
489
490 if (svga->gb_query == NULL) {
491 /* Create a gb query object */
492 svga->gb_query = sws->query_create(sws, SVGA_QUERY_MEM_SIZE);
493 if (!svga->gb_query)
494 return PIPE_ERROR_OUT_OF_MEMORY;
495 svga->gb_query_len = SVGA_QUERY_MEM_SIZE;
496 memset (svga->gb_query_map, 0, sizeof(svga->gb_query_map));
497 svga->gb_query_alloc_mask = util_bitmask_create();
498
499 /* Bind the query object to the context */
500 if (svga->swc->query_bind(svga->swc, svga->gb_query,
501 SVGA_QUERY_FLAG_SET) != PIPE_OK) {
502 svga_context_flush(svga, NULL);
503 svga->swc->query_bind(svga->swc, svga->gb_query,
504 SVGA_QUERY_FLAG_SET);
505 }
506 }
507
508 sq->gb_query = svga->gb_query;
509
510 /* Allocate an integer ID for this query */
511 sq->id = util_bitmask_add(svga->query_id_bm);
512 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
513 return PIPE_ERROR_OUT_OF_MEMORY;
514
515 /* Find a slot for this query in the gb object */
516 qlen = resultLen + sizeof(SVGA3dQueryState);
517 sq->offset = allocate_query(svga, sq->svga_type, qlen);
518 if (sq->offset == -1)
519 return PIPE_ERROR_OUT_OF_MEMORY;
520
521 SVGA_DBG(DEBUG_QUERY, " query type=%d qid=0x%x offset=%d\n",
522 sq->svga_type, sq->id, sq->offset);
523
524 /**
525 * Send SVGA3D commands to define the query
526 */
527 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
528 if (ret != PIPE_OK) {
529 svga_context_flush(svga, NULL);
530 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
531 }
532 if (ret != PIPE_OK)
533 return PIPE_ERROR_OUT_OF_MEMORY;
534
535 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
536 if (ret != PIPE_OK) {
537 svga_context_flush(svga, NULL);
538 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
539 }
540 assert(ret == PIPE_OK);
541
542 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
543 if (ret != PIPE_OK) {
544 svga_context_flush(svga, NULL);
545 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
546 }
547 assert(ret == PIPE_OK);
548
549 return PIPE_OK;
550 }
551
552 static enum pipe_error
553 destroy_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
554 {
555 enum pipe_error ret;
556
557 ret = SVGA3D_vgpu10_DestroyQuery(svga->swc, sq->id);
558
559 /* Deallocate the memory slot allocated for this query */
560 deallocate_query(svga, sq);
561
562 return ret;
563 }
564
565
566 /**
567 * Rebind queryies to the context.
568 */
569 static void
570 rebind_vgpu10_query(struct svga_context *svga)
571 {
572 if (svga->swc->query_bind(svga->swc, svga->gb_query,
573 SVGA_QUERY_FLAG_REF) != PIPE_OK) {
574 svga_context_flush(svga, NULL);
575 svga->swc->query_bind(svga->swc, svga->gb_query,
576 SVGA_QUERY_FLAG_REF);
577 }
578
579 svga->rebind.flags.query = FALSE;
580 }
581
582
583 static enum pipe_error
584 begin_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
585 {
586 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
587 enum pipe_error ret = PIPE_OK;
588 int status = 0;
589
590 sws->fence_reference(sws, &sq->fence, NULL);
591
592 /* Initialize the query state to NEW */
593 status = sws->query_init(sws, sq->gb_query, sq->offset, SVGA3D_QUERYSTATE_NEW);
594 if (status)
595 return PIPE_ERROR;
596
597 if (svga->rebind.flags.query) {
598 rebind_vgpu10_query(svga);
599 }
600
601 /* Send the BeginQuery command to the device */
602 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
603 if (ret != PIPE_OK) {
604 svga_context_flush(svga, NULL);
605 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
606 }
607 return ret;
608 }
609
610 static enum pipe_error
611 end_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
612 {
613 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
614 enum pipe_error ret = PIPE_OK;
615
616 if (svga->rebind.flags.query) {
617 rebind_vgpu10_query(svga);
618 }
619
620 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
621 if (ret != PIPE_OK) {
622 svga_context_flush(svga, NULL);
623 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
624 }
625
626 /* Finish fence is copied here from get_query_result_vgpu10. This helps
627 * with cases where svga_begin_query might be called again before
628 * svga_get_query_result, such as GL_TIME_ELAPSED.
629 */
630 if (!sq->fence) {
631 svga_context_flush(svga, &sq->fence);
632 }
633 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
634
635 return ret;
636 }
637
638 static boolean
639 get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
640 boolean wait, void *result, int resultLen)
641 {
642 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
643 SVGA3dQueryState queryState;
644
645 if (svga->rebind.flags.query) {
646 rebind_vgpu10_query(svga);
647 }
648
649 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
650
651 if (queryState == SVGA3D_QUERYSTATE_PENDING) {
652 if (!wait)
653 return FALSE;
654 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
655 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
656 }
657
658 assert(queryState == SVGA3D_QUERYSTATE_SUCCEEDED ||
659 queryState == SVGA3D_QUERYSTATE_FAILED);
660
661 return TRUE;
662 }
663
664 static struct pipe_query *
665 svga_create_query(struct pipe_context *pipe,
666 unsigned query_type,
667 unsigned index)
668 {
669 struct svga_context *svga = svga_context(pipe);
670 struct svga_query *sq;
671
672 assert(query_type < SVGA_QUERY_MAX);
673
674 sq = CALLOC_STRUCT(svga_query);
675 if (!sq)
676 goto fail;
677
678 /* Allocate an integer ID for the query */
679 sq->id = util_bitmask_add(svga->query_id_bm);
680 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
681 goto fail;
682
683 SVGA_DBG(DEBUG_QUERY, "%s type=%d sq=0x%x id=%d\n", __FUNCTION__,
684 query_type, sq, sq->id);
685
686 switch (query_type) {
687 case PIPE_QUERY_OCCLUSION_COUNTER:
688 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
689 if (svga_have_vgpu10(svga)) {
690 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionQueryResult));
691
692 /**
693 * In OpenGL, occlusion counter query can be used in conditional
694 * rendering; however, in DX10, only OCCLUSION_PREDICATE query can
695 * be used for predication. Hence, we need to create an occlusion
696 * predicate query along with the occlusion counter query. So when
697 * the occlusion counter query is used for predication, the associated
698 * query of occlusion predicate type will be used
699 * in the SetPredication command.
700 */
701 sq->predicate = svga_create_query(pipe, PIPE_QUERY_OCCLUSION_PREDICATE, index);
702
703 } else {
704 define_query_vgpu9(svga, sq);
705 }
706 break;
707 case PIPE_QUERY_OCCLUSION_PREDICATE:
708 assert(svga_have_vgpu10(svga));
709 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE;
710 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionPredicateQueryResult));
711 break;
712 case PIPE_QUERY_PRIMITIVES_GENERATED:
713 case PIPE_QUERY_PRIMITIVES_EMITTED:
714 case PIPE_QUERY_SO_STATISTICS:
715 assert(svga_have_vgpu10(svga));
716 sq->svga_type = SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS;
717 define_query_vgpu10(svga, sq,
718 sizeof(SVGADXStreamOutStatisticsQueryResult));
719 break;
720 case PIPE_QUERY_TIMESTAMP:
721 assert(svga_have_vgpu10(svga));
722 sq->svga_type = SVGA3D_QUERYTYPE_TIMESTAMP;
723 define_query_vgpu10(svga, sq,
724 sizeof(SVGADXTimestampQueryResult));
725 break;
726 case SVGA_QUERY_NUM_DRAW_CALLS:
727 case SVGA_QUERY_NUM_FALLBACKS:
728 case SVGA_QUERY_NUM_FLUSHES:
729 case SVGA_QUERY_NUM_VALIDATIONS:
730 case SVGA_QUERY_MAP_BUFFER_TIME:
731 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
732 case SVGA_QUERY_NUM_BYTES_UPLOADED:
733 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
734 case SVGA_QUERY_FLUSH_TIME:
735 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
736 case SVGA_QUERY_MEMORY_USED:
737 case SVGA_QUERY_NUM_SHADERS:
738 case SVGA_QUERY_NUM_RESOURCES:
739 case SVGA_QUERY_NUM_STATE_OBJECTS:
740 case SVGA_QUERY_NUM_SURFACE_VIEWS:
741 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
742 case SVGA_QUERY_NUM_READBACKS:
743 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
744 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
745 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
746 case SVGA_QUERY_NUM_CONST_UPDATES:
747 break;
748 default:
749 assert(!"unexpected query type in svga_create_query()");
750 }
751
752 sq->type = query_type;
753
754 return &sq->base;
755
756 fail:
757 FREE(sq);
758 return NULL;
759 }
760
761 static void
762 svga_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
763 {
764 struct svga_context *svga = svga_context(pipe);
765 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
766 struct svga_query *sq;
767
768 if (!q) {
769 destroy_gb_query_obj(svga);
770 return;
771 }
772
773 sq = svga_query(q);
774
775 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
776 sq, sq->id);
777
778 switch (sq->type) {
779 case PIPE_QUERY_OCCLUSION_COUNTER:
780 if (svga_have_vgpu10(svga)) {
781 /* make sure to also destroy any associated predicate query */
782 if (sq->predicate)
783 svga_destroy_query(pipe, sq->predicate);
784 destroy_query_vgpu10(svga, sq);
785 } else {
786 sws->buffer_destroy(sws, sq->hwbuf);
787 }
788 sws->fence_reference(sws, &sq->fence, NULL);
789 break;
790 case PIPE_QUERY_OCCLUSION_PREDICATE:
791 assert(svga_have_vgpu10(svga));
792 destroy_query_vgpu10(svga, sq);
793 sws->fence_reference(sws, &sq->fence, NULL);
794 break;
795 case PIPE_QUERY_PRIMITIVES_GENERATED:
796 case PIPE_QUERY_PRIMITIVES_EMITTED:
797 case PIPE_QUERY_SO_STATISTICS:
798 case PIPE_QUERY_TIMESTAMP:
799 assert(svga_have_vgpu10(svga));
800 destroy_query_vgpu10(svga, sq);
801 sws->fence_reference(sws, &sq->fence, NULL);
802 break;
803 case SVGA_QUERY_NUM_DRAW_CALLS:
804 case SVGA_QUERY_NUM_FALLBACKS:
805 case SVGA_QUERY_NUM_FLUSHES:
806 case SVGA_QUERY_NUM_VALIDATIONS:
807 case SVGA_QUERY_MAP_BUFFER_TIME:
808 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
809 case SVGA_QUERY_NUM_BYTES_UPLOADED:
810 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
811 case SVGA_QUERY_FLUSH_TIME:
812 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
813 case SVGA_QUERY_MEMORY_USED:
814 case SVGA_QUERY_NUM_SHADERS:
815 case SVGA_QUERY_NUM_RESOURCES:
816 case SVGA_QUERY_NUM_STATE_OBJECTS:
817 case SVGA_QUERY_NUM_SURFACE_VIEWS:
818 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
819 case SVGA_QUERY_NUM_READBACKS:
820 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
821 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
822 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
823 case SVGA_QUERY_NUM_CONST_UPDATES:
824 /* nothing */
825 break;
826 default:
827 assert(!"svga: unexpected query type in svga_destroy_query()");
828 }
829
830 /* Free the query id */
831 util_bitmask_clear(svga->query_id_bm, sq->id);
832
833 FREE(sq);
834 }
835
836
837 static boolean
838 svga_begin_query(struct pipe_context *pipe, struct pipe_query *q)
839 {
840 struct svga_context *svga = svga_context(pipe);
841 struct svga_query *sq = svga_query(q);
842 enum pipe_error ret;
843
844 assert(sq);
845 assert(sq->type < SVGA_QUERY_MAX);
846
847 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
848 sq, sq->id);
849
850 /* Need to flush out buffered drawing commands so that they don't
851 * get counted in the query results.
852 */
853 svga_hwtnl_flush_retry(svga);
854
855 switch (sq->type) {
856 case PIPE_QUERY_OCCLUSION_COUNTER:
857 if (svga_have_vgpu10(svga)) {
858 ret = begin_query_vgpu10(svga, sq);
859 /* also need to start the associated occlusion predicate query */
860 if (sq->predicate) {
861 enum pipe_error status;
862 status = begin_query_vgpu10(svga, svga_query(sq->predicate));
863 assert(status == PIPE_OK);
864 (void) status;
865 }
866 } else {
867 ret = begin_query_vgpu9(svga, sq);
868 }
869 assert(ret == PIPE_OK);
870 (void) ret;
871 break;
872 case PIPE_QUERY_OCCLUSION_PREDICATE:
873 assert(svga_have_vgpu10(svga));
874 ret = begin_query_vgpu10(svga, sq);
875 assert(ret == PIPE_OK);
876 break;
877 case PIPE_QUERY_PRIMITIVES_GENERATED:
878 case PIPE_QUERY_PRIMITIVES_EMITTED:
879 case PIPE_QUERY_SO_STATISTICS:
880 case PIPE_QUERY_TIMESTAMP:
881 assert(svga_have_vgpu10(svga));
882 ret = begin_query_vgpu10(svga, sq);
883 assert(ret == PIPE_OK);
884 break;
885 case SVGA_QUERY_NUM_DRAW_CALLS:
886 sq->begin_count = svga->hud.num_draw_calls;
887 break;
888 case SVGA_QUERY_NUM_FALLBACKS:
889 sq->begin_count = svga->hud.num_fallbacks;
890 break;
891 case SVGA_QUERY_NUM_FLUSHES:
892 sq->begin_count = svga->hud.num_flushes;
893 break;
894 case SVGA_QUERY_NUM_VALIDATIONS:
895 sq->begin_count = svga->hud.num_validations;
896 break;
897 case SVGA_QUERY_MAP_BUFFER_TIME:
898 sq->begin_count = svga->hud.map_buffer_time;
899 break;
900 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
901 sq->begin_count = svga->hud.num_resources_mapped;
902 break;
903 case SVGA_QUERY_NUM_BYTES_UPLOADED:
904 sq->begin_count = svga->hud.num_bytes_uploaded;
905 break;
906 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
907 sq->begin_count = svga->hud.command_buffer_size;
908 break;
909 case SVGA_QUERY_FLUSH_TIME:
910 sq->begin_count = svga->hud.flush_time;
911 break;
912 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
913 sq->begin_count = svga->hud.surface_write_flushes;
914 break;
915 case SVGA_QUERY_NUM_READBACKS:
916 sq->begin_count = svga->hud.num_readbacks;
917 break;
918 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
919 sq->begin_count = svga->hud.num_resource_updates;
920 break;
921 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
922 sq->begin_count = svga->hud.num_buffer_uploads;
923 break;
924 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
925 sq->begin_count = svga->hud.num_const_buf_updates;
926 break;
927 case SVGA_QUERY_NUM_CONST_UPDATES:
928 sq->begin_count = svga->hud.num_const_updates;
929 break;
930 case SVGA_QUERY_MEMORY_USED:
931 case SVGA_QUERY_NUM_SHADERS:
932 case SVGA_QUERY_NUM_RESOURCES:
933 case SVGA_QUERY_NUM_STATE_OBJECTS:
934 case SVGA_QUERY_NUM_SURFACE_VIEWS:
935 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
936 /* nothing */
937 break;
938 default:
939 assert(!"unexpected query type in svga_begin_query()");
940 }
941
942 svga->sq[sq->type] = sq;
943
944 return true;
945 }
946
947
948 static bool
949 svga_end_query(struct pipe_context *pipe, struct pipe_query *q)
950 {
951 struct svga_context *svga = svga_context(pipe);
952 struct svga_query *sq = svga_query(q);
953 enum pipe_error ret;
954
955 assert(sq);
956 assert(sq->type < SVGA_QUERY_MAX);
957
958 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
959 sq, sq->id);
960
961 if (sq->type == PIPE_QUERY_TIMESTAMP && svga->sq[sq->type] != sq)
962 svga_begin_query(pipe, q);
963
964 svga_hwtnl_flush_retry(svga);
965
966 assert(svga->sq[sq->type] == sq);
967
968 switch (sq->type) {
969 case PIPE_QUERY_OCCLUSION_COUNTER:
970 if (svga_have_vgpu10(svga)) {
971 ret = end_query_vgpu10(svga, sq);
972 /* also need to end the associated occlusion predicate query */
973 if (sq->predicate) {
974 enum pipe_error status;
975 status = end_query_vgpu10(svga, svga_query(sq->predicate));
976 assert(status == PIPE_OK);
977 (void) status;
978 }
979 } else {
980 ret = end_query_vgpu9(svga, sq);
981 }
982 assert(ret == PIPE_OK);
983 (void) ret;
984 /* TODO: Delay flushing. We don't really need to flush here, just ensure
985 * that there is one flush before svga_get_query_result attempts to get
986 * the result.
987 */
988 svga_context_flush(svga, NULL);
989 break;
990 case PIPE_QUERY_OCCLUSION_PREDICATE:
991 assert(svga_have_vgpu10(svga));
992 ret = end_query_vgpu10(svga, sq);
993 assert(ret == PIPE_OK);
994 break;
995 case PIPE_QUERY_PRIMITIVES_GENERATED:
996 case PIPE_QUERY_PRIMITIVES_EMITTED:
997 case PIPE_QUERY_SO_STATISTICS:
998 case PIPE_QUERY_TIMESTAMP:
999 assert(svga_have_vgpu10(svga));
1000 ret = end_query_vgpu10(svga, sq);
1001 assert(ret == PIPE_OK);
1002 break;
1003 case SVGA_QUERY_NUM_DRAW_CALLS:
1004 sq->end_count = svga->hud.num_draw_calls;
1005 break;
1006 case SVGA_QUERY_NUM_FALLBACKS:
1007 sq->end_count = svga->hud.num_fallbacks;
1008 break;
1009 case SVGA_QUERY_NUM_FLUSHES:
1010 sq->end_count = svga->hud.num_flushes;
1011 break;
1012 case SVGA_QUERY_NUM_VALIDATIONS:
1013 sq->end_count = svga->hud.num_validations;
1014 break;
1015 case SVGA_QUERY_MAP_BUFFER_TIME:
1016 sq->end_count = svga->hud.map_buffer_time;
1017 break;
1018 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
1019 sq->end_count = svga->hud.num_resources_mapped;
1020 break;
1021 case SVGA_QUERY_NUM_BYTES_UPLOADED:
1022 sq->end_count = svga->hud.num_bytes_uploaded;
1023 break;
1024 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1025 sq->end_count = svga->hud.command_buffer_size;
1026 break;
1027 case SVGA_QUERY_FLUSH_TIME:
1028 sq->end_count = svga->hud.flush_time;
1029 break;
1030 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1031 sq->end_count = svga->hud.surface_write_flushes;
1032 break;
1033 case SVGA_QUERY_NUM_READBACKS:
1034 sq->end_count = svga->hud.num_readbacks;
1035 break;
1036 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1037 sq->end_count = svga->hud.num_resource_updates;
1038 break;
1039 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1040 sq->end_count = svga->hud.num_buffer_uploads;
1041 break;
1042 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1043 sq->end_count = svga->hud.num_const_buf_updates;
1044 break;
1045 case SVGA_QUERY_NUM_CONST_UPDATES:
1046 sq->end_count = svga->hud.num_const_updates;
1047 break;
1048 case SVGA_QUERY_MEMORY_USED:
1049 case SVGA_QUERY_NUM_SHADERS:
1050 case SVGA_QUERY_NUM_RESOURCES:
1051 case SVGA_QUERY_NUM_STATE_OBJECTS:
1052 case SVGA_QUERY_NUM_SURFACE_VIEWS:
1053 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1054 /* nothing */
1055 break;
1056 default:
1057 assert(!"unexpected query type in svga_end_query()");
1058 }
1059 svga->sq[sq->type] = NULL;
1060 return true;
1061 }
1062
1063
1064 static boolean
1065 svga_get_query_result(struct pipe_context *pipe,
1066 struct pipe_query *q,
1067 boolean wait,
1068 union pipe_query_result *vresult)
1069 {
1070 struct svga_screen *svgascreen = svga_screen(pipe->screen);
1071 struct svga_context *svga = svga_context(pipe);
1072 struct svga_query *sq = svga_query(q);
1073 uint64_t *result = (uint64_t *)vresult;
1074 boolean ret = TRUE;
1075
1076 assert(sq);
1077
1078 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d wait: %d\n",
1079 __FUNCTION__, sq, sq->id, wait);
1080
1081 switch (sq->type) {
1082 case PIPE_QUERY_OCCLUSION_COUNTER:
1083 if (svga_have_vgpu10(svga)) {
1084 SVGADXOcclusionQueryResult occResult;
1085 ret = get_query_result_vgpu10(svga, sq, wait,
1086 (void *)&occResult, sizeof(occResult));
1087 *result = (uint64_t)occResult.samplesRendered;
1088 } else {
1089 ret = get_query_result_vgpu9(svga, sq, wait, (uint64_t *)result);
1090 }
1091 break;
1092 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1093 SVGADXOcclusionPredicateQueryResult occResult;
1094 assert(svga_have_vgpu10(svga));
1095 ret = get_query_result_vgpu10(svga, sq, wait,
1096 (void *)&occResult, sizeof(occResult));
1097 vresult->b = occResult.anySamplesRendered != 0;
1098 break;
1099 }
1100 case PIPE_QUERY_SO_STATISTICS: {
1101 SVGADXStreamOutStatisticsQueryResult sResult;
1102 struct pipe_query_data_so_statistics *pResult =
1103 (struct pipe_query_data_so_statistics *)vresult;
1104
1105 assert(svga_have_vgpu10(svga));
1106 ret = get_query_result_vgpu10(svga, sq, wait,
1107 (void *)&sResult, sizeof(sResult));
1108 pResult->num_primitives_written = sResult.numPrimitivesWritten;
1109 pResult->primitives_storage_needed = sResult.numPrimitivesRequired;
1110 break;
1111 }
1112 case PIPE_QUERY_TIMESTAMP: {
1113 SVGADXTimestampQueryResult sResult;
1114
1115 assert(svga_have_vgpu10(svga));
1116 ret = get_query_result_vgpu10(svga, sq, wait,
1117 (void *)&sResult, sizeof(sResult));
1118 *result = (uint64_t)sResult.timestamp;
1119 break;
1120 }
1121 case PIPE_QUERY_PRIMITIVES_GENERATED: {
1122 SVGADXStreamOutStatisticsQueryResult sResult;
1123
1124 assert(svga_have_vgpu10(svga));
1125 ret = get_query_result_vgpu10(svga, sq, wait,
1126 (void *)&sResult, sizeof sResult);
1127 *result = (uint64_t)sResult.numPrimitivesRequired;
1128 break;
1129 }
1130 case PIPE_QUERY_PRIMITIVES_EMITTED: {
1131 SVGADXStreamOutStatisticsQueryResult sResult;
1132
1133 assert(svga_have_vgpu10(svga));
1134 ret = get_query_result_vgpu10(svga, sq, wait,
1135 (void *)&sResult, sizeof sResult);
1136 *result = (uint64_t)sResult.numPrimitivesWritten;
1137 break;
1138 }
1139 /* These are per-frame counters */
1140 case SVGA_QUERY_NUM_DRAW_CALLS:
1141 case SVGA_QUERY_NUM_FALLBACKS:
1142 case SVGA_QUERY_NUM_FLUSHES:
1143 case SVGA_QUERY_NUM_VALIDATIONS:
1144 case SVGA_QUERY_MAP_BUFFER_TIME:
1145 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
1146 case SVGA_QUERY_NUM_BYTES_UPLOADED:
1147 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1148 case SVGA_QUERY_FLUSH_TIME:
1149 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1150 case SVGA_QUERY_NUM_READBACKS:
1151 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1152 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1153 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1154 case SVGA_QUERY_NUM_CONST_UPDATES:
1155 vresult->u64 = sq->end_count - sq->begin_count;
1156 break;
1157 /* These are running total counters */
1158 case SVGA_QUERY_MEMORY_USED:
1159 vresult->u64 = svgascreen->hud.total_resource_bytes;
1160 break;
1161 case SVGA_QUERY_NUM_SHADERS:
1162 vresult->u64 = svga->hud.num_shaders;
1163 break;
1164 case SVGA_QUERY_NUM_RESOURCES:
1165 vresult->u64 = svgascreen->hud.num_resources;
1166 break;
1167 case SVGA_QUERY_NUM_STATE_OBJECTS:
1168 vresult->u64 = svga->hud.num_state_objects;
1169 break;
1170 case SVGA_QUERY_NUM_SURFACE_VIEWS:
1171 vresult->u64 = svga->hud.num_surface_views;
1172 break;
1173 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1174 vresult->u64 = svga->hud.num_generate_mipmap;
1175 break;
1176 default:
1177 assert(!"unexpected query type in svga_get_query_result");
1178 }
1179
1180 SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, *((uint64_t *)vresult));
1181
1182 return ret;
1183 }
1184
1185 static void
1186 svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
1187 boolean condition, uint mode)
1188 {
1189 struct svga_context *svga = svga_context(pipe);
1190 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
1191 struct svga_query *sq = svga_query(q);
1192 SVGA3dQueryId queryId;
1193 enum pipe_error ret;
1194
1195 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
1196
1197 assert(svga_have_vgpu10(svga));
1198 if (sq == NULL) {
1199 queryId = SVGA3D_INVALID_ID;
1200 }
1201 else {
1202 assert(sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION ||
1203 sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE);
1204
1205 if (sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION) {
1206 assert(sq->predicate);
1207 /**
1208 * For conditional rendering, make sure to use the associated
1209 * predicate query.
1210 */
1211 sq = svga_query(sq->predicate);
1212 }
1213 queryId = sq->id;
1214
1215 if ((mode == PIPE_RENDER_COND_WAIT ||
1216 mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
1217 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
1218 }
1219 }
1220
1221 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1222 (uint32) condition);
1223 if (ret != PIPE_OK) {
1224 svga_context_flush(svga, NULL);
1225 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1226 (uint32) condition);
1227 }
1228 }
1229
1230
1231 /*
1232 * This function is a workaround because we lack the ability to query
1233 * renderer's time synchornously.
1234 */
1235 static uint64_t
1236 svga_get_timestamp(struct pipe_context *pipe)
1237 {
1238 struct pipe_query *q = svga_create_query(pipe, PIPE_QUERY_TIMESTAMP, 0);
1239 union pipe_query_result result;
1240
1241 svga_begin_query(pipe, q);
1242 svga_end_query(pipe,q);
1243 svga_get_query_result(pipe, q, TRUE, &result);
1244 svga_destroy_query(pipe, q);
1245
1246 return result.u64;
1247 }
1248
1249
1250 static void
1251 svga_set_active_query_state(struct pipe_context *pipe, boolean enable)
1252 {
1253 }
1254
1255
1256 void
1257 svga_init_query_functions(struct svga_context *svga)
1258 {
1259 svga->pipe.create_query = svga_create_query;
1260 svga->pipe.destroy_query = svga_destroy_query;
1261 svga->pipe.begin_query = svga_begin_query;
1262 svga->pipe.end_query = svga_end_query;
1263 svga->pipe.get_query_result = svga_get_query_result;
1264 svga->pipe.set_active_query_state = svga_set_active_query_state;
1265 svga->pipe.render_condition = svga_render_condition;
1266 svga->pipe.get_timestamp = svga_get_timestamp;
1267 }