svga: don't call os_get_time() when not needed by Gallium HUD
[mesa.git] / src / gallium / drivers / svga / svga_pipe_query.c
1 /**********************************************************
2 * Copyright 2008-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "pipe/p_state.h"
27 #include "pipe/p_context.h"
28
29 #include "util/u_bitmask.h"
30 #include "util/u_memory.h"
31
32 #include "svga_cmd.h"
33 #include "svga_context.h"
34 #include "svga_screen.h"
35 #include "svga_resource_buffer.h"
36 #include "svga_winsys.h"
37 #include "svga_debug.h"
38
39
40 /* Fixme: want a public base class for all pipe structs, even if there
41 * isn't much in them.
42 */
43 struct pipe_query {
44 int dummy;
45 };
46
47 struct svga_query {
48 struct pipe_query base;
49 unsigned type; /**< PIPE_QUERY_x or SVGA_QUERY_x */
50 SVGA3dQueryType svga_type; /**< SVGA3D_QUERYTYPE_x or unused */
51
52 unsigned id; /** Per-context query identifier */
53
54 struct pipe_fence_handle *fence;
55
56 /** For PIPE_QUERY_OCCLUSION_COUNTER / SVGA3D_QUERYTYPE_OCCLUSION */
57
58 /* For VGPU9 */
59 struct svga_winsys_buffer *hwbuf;
60 volatile SVGA3dQueryResult *queryResult;
61
62 /** For VGPU10 */
63 struct svga_winsys_gb_query *gb_query;
64 SVGA3dDXQueryFlags flags;
65 unsigned offset; /**< offset to the gb_query memory */
66 struct pipe_query *predicate; /** The associated query that can be used for predicate */
67
68 /** For non-GPU SVGA_QUERY_x queries */
69 uint64_t begin_count, end_count;
70 };
71
72
73 /** cast wrapper */
74 static inline struct svga_query *
75 svga_query(struct pipe_query *q)
76 {
77 return (struct svga_query *)q;
78 }
79
80 /**
81 * VGPU9
82 */
83
84 static boolean
85 svga_get_query_result(struct pipe_context *pipe,
86 struct pipe_query *q,
87 boolean wait,
88 union pipe_query_result *result);
89
90 static enum pipe_error
91 define_query_vgpu9(struct svga_context *svga,
92 struct svga_query *sq)
93 {
94 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
95
96 sq->hwbuf = svga_winsys_buffer_create(svga, 1,
97 SVGA_BUFFER_USAGE_PINNED,
98 sizeof *sq->queryResult);
99 if (!sq->hwbuf)
100 return PIPE_ERROR_OUT_OF_MEMORY;
101
102 sq->queryResult = (SVGA3dQueryResult *)
103 sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
104 if (!sq->queryResult) {
105 sws->buffer_destroy(sws, sq->hwbuf);
106 return PIPE_ERROR_OUT_OF_MEMORY;
107 }
108
109 sq->queryResult->totalSize = sizeof *sq->queryResult;
110 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
111
112 /* We request the buffer to be pinned and assume it is always mapped.
113 * The reason is that we don't want to wait for fences when checking the
114 * query status.
115 */
116 sws->buffer_unmap(sws, sq->hwbuf);
117
118 return PIPE_OK;
119 }
120
121 static enum pipe_error
122 begin_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
123 {
124 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
125 enum pipe_error ret = PIPE_OK;
126
127 if (sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) {
128 /* The application doesn't care for the pending query result.
129 * We cannot let go of the existing buffer and just get a new one
130 * because its storage may be reused for other purposes and clobbered
131 * by the host when it determines the query result. So the only
132 * option here is to wait for the existing query's result -- not a
133 * big deal, given that no sane application would do this.
134 */
135 uint64_t result;
136 svga_get_query_result(&svga->pipe, &sq->base, TRUE, (void*)&result);
137 assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING);
138 }
139
140 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
141 sws->fence_reference(sws, &sq->fence, NULL);
142
143 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
144 if (ret != PIPE_OK) {
145 svga_context_flush(svga, NULL);
146 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
147 }
148 return ret;
149 }
150
151 static enum pipe_error
152 end_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
153 {
154 enum pipe_error ret = PIPE_OK;
155
156 /* Set to PENDING before sending EndQuery. */
157 sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING;
158
159 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
160 if (ret != PIPE_OK) {
161 svga_context_flush(svga, NULL);
162 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
163 }
164 return ret;
165 }
166
167 static boolean
168 get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
169 boolean wait, uint64_t *result)
170 {
171 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
172 enum pipe_error ret;
173 SVGA3dQueryState state;
174
175 if (!sq->fence) {
176 /* The query status won't be updated by the host unless
177 * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause
178 * a synchronous wait on the host.
179 */
180 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
181 if (ret != PIPE_OK) {
182 svga_context_flush(svga, NULL);
183 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
184 }
185 assert (ret == PIPE_OK);
186 svga_context_flush(svga, &sq->fence);
187 assert(sq->fence);
188 }
189
190 state = sq->queryResult->state;
191 if (state == SVGA3D_QUERYSTATE_PENDING) {
192 if (!wait)
193 return FALSE;
194 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
195 state = sq->queryResult->state;
196 }
197
198 assert(state == SVGA3D_QUERYSTATE_SUCCEEDED ||
199 state == SVGA3D_QUERYSTATE_FAILED);
200
201 *result = (uint64_t)sq->queryResult->result32;
202 return TRUE;
203 }
204
205
206 /**
207 * VGPU10
208 *
209 * There is one query mob allocated for each context to be shared by all
210 * query types. The mob is used to hold queries's state and result. Since
211 * each query result type is of different length, to ease the query allocation
212 * management, the mob is divided into memory blocks. Each memory block
213 * will hold queries of the same type. Multiple memory blocks can be allocated
214 * for a particular query type.
215 *
216 * Currently each memory block is of 184 bytes. We support up to 128
217 * memory blocks. The query memory size is arbitrary right now.
218 * Each occlusion query takes about 8 bytes. One memory block can accomodate
219 * 23 occlusion queries. 128 of those blocks can support up to 2944 occlusion
220 * queries. That seems reasonable for now. If we think this limit is
221 * not enough, we can increase the limit or try to grow the mob in runtime.
222 * Note, SVGA device does not impose one mob per context for queries,
223 * we could allocate multiple mobs for queries; however, wddm KMD does not
224 * currently support that.
225 *
226 * Also note that the GL guest driver does not issue any of the
227 * following commands: DXMoveQuery, DXBindAllQuery & DXReadbackAllQuery.
228 */
229 #define SVGA_QUERY_MEM_BLOCK_SIZE (sizeof(SVGADXQueryResultUnion) * 2)
230 #define SVGA_QUERY_MEM_SIZE (128 * SVGA_QUERY_MEM_BLOCK_SIZE)
231
232 struct svga_qmem_alloc_entry
233 {
234 unsigned start_offset; /* start offset of the memory block */
235 unsigned block_index; /* block index of the memory block */
236 unsigned query_size; /* query size in this memory block */
237 unsigned nquery; /* number of queries allocated */
238 struct util_bitmask *alloc_mask; /* allocation mask */
239 struct svga_qmem_alloc_entry *next; /* next memory block */
240 };
241
242
243 /**
244 * Allocate a memory block from the query object memory
245 * \return -1 if out of memory, else index of the query memory block
246 */
247 static int
248 allocate_query_block(struct svga_context *svga)
249 {
250 int index;
251 unsigned offset;
252
253 /* Find the next available query block */
254 index = util_bitmask_add(svga->gb_query_alloc_mask);
255
256 if (index == UTIL_BITMASK_INVALID_INDEX)
257 return -1;
258
259 offset = index * SVGA_QUERY_MEM_BLOCK_SIZE;
260 if (offset >= svga->gb_query_len) {
261 unsigned i;
262
263 /**
264 * All the memory blocks are allocated, lets see if there is
265 * any empty memory block around that can be freed up.
266 */
267 index = -1;
268 for (i = 0; i < SVGA_QUERY_MAX && index == -1; i++) {
269 struct svga_qmem_alloc_entry *alloc_entry;
270 struct svga_qmem_alloc_entry *prev_alloc_entry = NULL;
271
272 alloc_entry = svga->gb_query_map[i];
273 while (alloc_entry && index == -1) {
274 if (alloc_entry->nquery == 0) {
275 /* This memory block is empty, it can be recycled. */
276 if (prev_alloc_entry) {
277 prev_alloc_entry->next = alloc_entry->next;
278 } else {
279 svga->gb_query_map[i] = alloc_entry->next;
280 }
281 index = alloc_entry->block_index;
282 } else {
283 prev_alloc_entry = alloc_entry;
284 alloc_entry = alloc_entry->next;
285 }
286 }
287 }
288 }
289
290 return index;
291 }
292
293 /**
294 * Allocate a slot in the specified memory block.
295 * All slots in this memory block are of the same size.
296 *
297 * \return -1 if out of memory, else index of the query slot
298 */
299 static int
300 allocate_query_slot(struct svga_context *svga,
301 struct svga_qmem_alloc_entry *alloc)
302 {
303 int index;
304 unsigned offset;
305
306 /* Find the next available slot */
307 index = util_bitmask_add(alloc->alloc_mask);
308
309 if (index == UTIL_BITMASK_INVALID_INDEX)
310 return -1;
311
312 offset = index * alloc->query_size;
313 if (offset >= SVGA_QUERY_MEM_BLOCK_SIZE)
314 return -1;
315
316 alloc->nquery++;
317
318 return index;
319 }
320
321 /**
322 * Deallocate the specified slot in the memory block.
323 * If all slots are freed up, then deallocate the memory block
324 * as well, so it can be allocated for other query type
325 */
326 static void
327 deallocate_query_slot(struct svga_context *svga,
328 struct svga_qmem_alloc_entry *alloc,
329 unsigned index)
330 {
331 assert(index != UTIL_BITMASK_INVALID_INDEX);
332
333 util_bitmask_clear(alloc->alloc_mask, index);
334 alloc->nquery--;
335
336 /**
337 * Don't worry about deallocating the empty memory block here.
338 * The empty memory block will be recycled when no more memory block
339 * can be allocated.
340 */
341 }
342
343 static struct svga_qmem_alloc_entry *
344 allocate_query_block_entry(struct svga_context *svga,
345 unsigned len)
346 {
347 struct svga_qmem_alloc_entry *alloc_entry;
348 int block_index = -1;
349
350 block_index = allocate_query_block(svga);
351 if (block_index == -1)
352 return NULL;
353 alloc_entry = CALLOC_STRUCT(svga_qmem_alloc_entry);
354 if (!alloc_entry)
355 return NULL;
356
357 alloc_entry->block_index = block_index;
358 alloc_entry->start_offset = block_index * SVGA_QUERY_MEM_BLOCK_SIZE;
359 alloc_entry->nquery = 0;
360 alloc_entry->alloc_mask = util_bitmask_create();
361 alloc_entry->next = NULL;
362 alloc_entry->query_size = len;
363
364 return alloc_entry;
365 }
366
367 /**
368 * Allocate a memory slot for a query of the specified type.
369 * It will first search through the memory blocks that are allocated
370 * for the query type. If no memory slot is available, it will try
371 * to allocate another memory block within the query object memory for
372 * this query type.
373 */
374 static int
375 allocate_query(struct svga_context *svga,
376 SVGA3dQueryType type,
377 unsigned len)
378 {
379 struct svga_qmem_alloc_entry *alloc_entry;
380 int slot_index = -1;
381 unsigned offset;
382
383 assert(type < SVGA_QUERY_MAX);
384
385 alloc_entry = svga->gb_query_map[type];
386
387 if (!alloc_entry) {
388 /**
389 * No query memory block has been allocated for this query type,
390 * allocate one now
391 */
392 alloc_entry = allocate_query_block_entry(svga, len);
393 if (!alloc_entry)
394 return -1;
395 svga->gb_query_map[type] = alloc_entry;
396 }
397
398 /* Allocate a slot within the memory block allocated for this query type */
399 slot_index = allocate_query_slot(svga, alloc_entry);
400
401 if (slot_index == -1) {
402 /* This query memory block is full, allocate another one */
403 alloc_entry = allocate_query_block_entry(svga, len);
404 if (!alloc_entry)
405 return -1;
406 alloc_entry->next = svga->gb_query_map[type];
407 svga->gb_query_map[type] = alloc_entry;
408 slot_index = allocate_query_slot(svga, alloc_entry);
409 }
410
411 assert(slot_index != -1);
412 offset = slot_index * len + alloc_entry->start_offset;
413
414 return offset;
415 }
416
417
418 /**
419 * Deallocate memory slot allocated for the specified query
420 */
421 static void
422 deallocate_query(struct svga_context *svga,
423 struct svga_query *sq)
424 {
425 struct svga_qmem_alloc_entry *alloc_entry;
426 unsigned slot_index;
427 unsigned offset = sq->offset;
428
429 alloc_entry = svga->gb_query_map[sq->svga_type];
430
431 while (alloc_entry) {
432 if (offset >= alloc_entry->start_offset &&
433 offset < alloc_entry->start_offset + SVGA_QUERY_MEM_BLOCK_SIZE) {
434
435 /* The slot belongs to this memory block, deallocate it */
436 slot_index = (offset - alloc_entry->start_offset) /
437 alloc_entry->query_size;
438 deallocate_query_slot(svga, alloc_entry, slot_index);
439 alloc_entry = NULL;
440 } else {
441 alloc_entry = alloc_entry->next;
442 }
443 }
444 }
445
446
447 /**
448 * Destroy the gb query object and all the related query structures
449 */
450 static void
451 destroy_gb_query_obj(struct svga_context *svga)
452 {
453 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
454 unsigned i;
455
456 for (i = 0; i < SVGA_QUERY_MAX; i++) {
457 struct svga_qmem_alloc_entry *alloc_entry, *next;
458 alloc_entry = svga->gb_query_map[i];
459 while (alloc_entry) {
460 next = alloc_entry->next;
461 util_bitmask_destroy(alloc_entry->alloc_mask);
462 FREE(alloc_entry);
463 alloc_entry = next;
464 }
465 svga->gb_query_map[i] = NULL;
466 }
467
468 if (svga->gb_query)
469 sws->query_destroy(sws, svga->gb_query);
470 svga->gb_query = NULL;
471
472 util_bitmask_destroy(svga->gb_query_alloc_mask);
473 }
474
475 /**
476 * Define query and create the gb query object if it is not already created.
477 * There is only one gb query object per context which will be shared by
478 * queries of all types.
479 */
480 static enum pipe_error
481 define_query_vgpu10(struct svga_context *svga,
482 struct svga_query *sq, int resultLen)
483 {
484 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
485 int qlen;
486 enum pipe_error ret = PIPE_OK;
487
488 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
489
490 if (svga->gb_query == NULL) {
491 /* Create a gb query object */
492 svga->gb_query = sws->query_create(sws, SVGA_QUERY_MEM_SIZE);
493 if (!svga->gb_query)
494 return PIPE_ERROR_OUT_OF_MEMORY;
495 svga->gb_query_len = SVGA_QUERY_MEM_SIZE;
496 memset (svga->gb_query_map, 0, sizeof(svga->gb_query_map));
497 svga->gb_query_alloc_mask = util_bitmask_create();
498
499 /* Bind the query object to the context */
500 if (svga->swc->query_bind(svga->swc, svga->gb_query,
501 SVGA_QUERY_FLAG_SET) != PIPE_OK) {
502 svga_context_flush(svga, NULL);
503 svga->swc->query_bind(svga->swc, svga->gb_query,
504 SVGA_QUERY_FLAG_SET);
505 }
506 }
507
508 sq->gb_query = svga->gb_query;
509
510 /* Allocate an integer ID for this query */
511 sq->id = util_bitmask_add(svga->query_id_bm);
512 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
513 return PIPE_ERROR_OUT_OF_MEMORY;
514
515 /* Find a slot for this query in the gb object */
516 qlen = resultLen + sizeof(SVGA3dQueryState);
517 sq->offset = allocate_query(svga, sq->svga_type, qlen);
518 if (sq->offset == -1)
519 return PIPE_ERROR_OUT_OF_MEMORY;
520
521 SVGA_DBG(DEBUG_QUERY, " query type=%d qid=0x%x offset=%d\n",
522 sq->svga_type, sq->id, sq->offset);
523
524 /**
525 * Send SVGA3D commands to define the query
526 */
527 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
528 if (ret != PIPE_OK) {
529 svga_context_flush(svga, NULL);
530 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
531 }
532 if (ret != PIPE_OK)
533 return PIPE_ERROR_OUT_OF_MEMORY;
534
535 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
536 if (ret != PIPE_OK) {
537 svga_context_flush(svga, NULL);
538 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
539 }
540 assert(ret == PIPE_OK);
541
542 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
543 if (ret != PIPE_OK) {
544 svga_context_flush(svga, NULL);
545 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
546 }
547 assert(ret == PIPE_OK);
548
549 return PIPE_OK;
550 }
551
552 static enum pipe_error
553 destroy_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
554 {
555 enum pipe_error ret;
556
557 ret = SVGA3D_vgpu10_DestroyQuery(svga->swc, sq->id);
558
559 /* Deallocate the memory slot allocated for this query */
560 deallocate_query(svga, sq);
561
562 return ret;
563 }
564
565
566 /**
567 * Rebind queryies to the context.
568 */
569 static void
570 rebind_vgpu10_query(struct svga_context *svga)
571 {
572 if (svga->swc->query_bind(svga->swc, svga->gb_query,
573 SVGA_QUERY_FLAG_REF) != PIPE_OK) {
574 svga_context_flush(svga, NULL);
575 svga->swc->query_bind(svga->swc, svga->gb_query,
576 SVGA_QUERY_FLAG_REF);
577 }
578
579 svga->rebind.flags.query = FALSE;
580 }
581
582
583 static enum pipe_error
584 begin_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
585 {
586 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
587 enum pipe_error ret = PIPE_OK;
588 int status = 0;
589
590 sws->fence_reference(sws, &sq->fence, NULL);
591
592 /* Initialize the query state to NEW */
593 status = sws->query_init(sws, sq->gb_query, sq->offset, SVGA3D_QUERYSTATE_NEW);
594 if (status)
595 return PIPE_ERROR;
596
597 if (svga->rebind.flags.query) {
598 rebind_vgpu10_query(svga);
599 }
600
601 /* Send the BeginQuery command to the device */
602 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
603 if (ret != PIPE_OK) {
604 svga_context_flush(svga, NULL);
605 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
606 }
607 return ret;
608 }
609
610 static enum pipe_error
611 end_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
612 {
613 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
614 enum pipe_error ret = PIPE_OK;
615
616 if (svga->rebind.flags.query) {
617 rebind_vgpu10_query(svga);
618 }
619
620 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
621 if (ret != PIPE_OK) {
622 svga_context_flush(svga, NULL);
623 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
624 }
625
626 /* Finish fence is copied here from get_query_result_vgpu10. This helps
627 * with cases where svga_begin_query might be called again before
628 * svga_get_query_result, such as GL_TIME_ELAPSED.
629 */
630 if (!sq->fence) {
631 svga_context_flush(svga, &sq->fence);
632 }
633 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
634
635 return ret;
636 }
637
638 static boolean
639 get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
640 boolean wait, void *result, int resultLen)
641 {
642 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
643 SVGA3dQueryState queryState;
644
645 if (svga->rebind.flags.query) {
646 rebind_vgpu10_query(svga);
647 }
648
649 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
650
651 if (queryState == SVGA3D_QUERYSTATE_PENDING) {
652 if (!wait)
653 return FALSE;
654 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
655 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
656 }
657
658 assert(queryState == SVGA3D_QUERYSTATE_SUCCEEDED ||
659 queryState == SVGA3D_QUERYSTATE_FAILED);
660
661 return TRUE;
662 }
663
664 static struct pipe_query *
665 svga_create_query(struct pipe_context *pipe,
666 unsigned query_type,
667 unsigned index)
668 {
669 struct svga_context *svga = svga_context(pipe);
670 struct svga_query *sq;
671
672 assert(query_type < SVGA_QUERY_MAX);
673
674 sq = CALLOC_STRUCT(svga_query);
675 if (!sq)
676 goto fail;
677
678 /* Allocate an integer ID for the query */
679 sq->id = util_bitmask_add(svga->query_id_bm);
680 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
681 goto fail;
682
683 SVGA_DBG(DEBUG_QUERY, "%s type=%d sq=0x%x id=%d\n", __FUNCTION__,
684 query_type, sq, sq->id);
685
686 switch (query_type) {
687 case PIPE_QUERY_OCCLUSION_COUNTER:
688 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
689 if (svga_have_vgpu10(svga)) {
690 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionQueryResult));
691
692 /**
693 * In OpenGL, occlusion counter query can be used in conditional
694 * rendering; however, in DX10, only OCCLUSION_PREDICATE query can
695 * be used for predication. Hence, we need to create an occlusion
696 * predicate query along with the occlusion counter query. So when
697 * the occlusion counter query is used for predication, the associated
698 * query of occlusion predicate type will be used
699 * in the SetPredication command.
700 */
701 sq->predicate = svga_create_query(pipe, PIPE_QUERY_OCCLUSION_PREDICATE, index);
702
703 } else {
704 define_query_vgpu9(svga, sq);
705 }
706 break;
707 case PIPE_QUERY_OCCLUSION_PREDICATE:
708 if (svga_have_vgpu10(svga)) {
709 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE;
710 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionPredicateQueryResult));
711 } else {
712 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
713 define_query_vgpu9(svga, sq);
714 }
715 break;
716 case PIPE_QUERY_PRIMITIVES_GENERATED:
717 case PIPE_QUERY_PRIMITIVES_EMITTED:
718 case PIPE_QUERY_SO_STATISTICS:
719 assert(svga_have_vgpu10(svga));
720 sq->svga_type = SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS;
721 define_query_vgpu10(svga, sq,
722 sizeof(SVGADXStreamOutStatisticsQueryResult));
723 break;
724 case PIPE_QUERY_TIMESTAMP:
725 assert(svga_have_vgpu10(svga));
726 sq->svga_type = SVGA3D_QUERYTYPE_TIMESTAMP;
727 define_query_vgpu10(svga, sq,
728 sizeof(SVGADXTimestampQueryResult));
729 break;
730 case SVGA_QUERY_NUM_DRAW_CALLS:
731 case SVGA_QUERY_NUM_FALLBACKS:
732 case SVGA_QUERY_NUM_FLUSHES:
733 case SVGA_QUERY_NUM_VALIDATIONS:
734 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
735 case SVGA_QUERY_NUM_BYTES_UPLOADED:
736 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
737 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
738 case SVGA_QUERY_MEMORY_USED:
739 case SVGA_QUERY_NUM_SHADERS:
740 case SVGA_QUERY_NUM_RESOURCES:
741 case SVGA_QUERY_NUM_STATE_OBJECTS:
742 case SVGA_QUERY_NUM_SURFACE_VIEWS:
743 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
744 case SVGA_QUERY_NUM_READBACKS:
745 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
746 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
747 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
748 case SVGA_QUERY_NUM_CONST_UPDATES:
749 break;
750 case SVGA_QUERY_FLUSH_TIME:
751 case SVGA_QUERY_MAP_BUFFER_TIME:
752 /* These queries need os_time_get() */
753 svga->hud.uses_time = TRUE;
754 break;
755 default:
756 assert(!"unexpected query type in svga_create_query()");
757 }
758
759 sq->type = query_type;
760
761 return &sq->base;
762
763 fail:
764 FREE(sq);
765 return NULL;
766 }
767
768 static void
769 svga_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
770 {
771 struct svga_context *svga = svga_context(pipe);
772 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
773 struct svga_query *sq;
774
775 if (!q) {
776 destroy_gb_query_obj(svga);
777 return;
778 }
779
780 sq = svga_query(q);
781
782 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
783 sq, sq->id);
784
785 switch (sq->type) {
786 case PIPE_QUERY_OCCLUSION_COUNTER:
787 case PIPE_QUERY_OCCLUSION_PREDICATE:
788 if (svga_have_vgpu10(svga)) {
789 /* make sure to also destroy any associated predicate query */
790 if (sq->predicate)
791 svga_destroy_query(pipe, sq->predicate);
792 destroy_query_vgpu10(svga, sq);
793 } else {
794 sws->buffer_destroy(sws, sq->hwbuf);
795 }
796 sws->fence_reference(sws, &sq->fence, NULL);
797 break;
798 case PIPE_QUERY_PRIMITIVES_GENERATED:
799 case PIPE_QUERY_PRIMITIVES_EMITTED:
800 case PIPE_QUERY_SO_STATISTICS:
801 case PIPE_QUERY_TIMESTAMP:
802 assert(svga_have_vgpu10(svga));
803 destroy_query_vgpu10(svga, sq);
804 sws->fence_reference(sws, &sq->fence, NULL);
805 break;
806 case SVGA_QUERY_NUM_DRAW_CALLS:
807 case SVGA_QUERY_NUM_FALLBACKS:
808 case SVGA_QUERY_NUM_FLUSHES:
809 case SVGA_QUERY_NUM_VALIDATIONS:
810 case SVGA_QUERY_MAP_BUFFER_TIME:
811 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
812 case SVGA_QUERY_NUM_BYTES_UPLOADED:
813 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
814 case SVGA_QUERY_FLUSH_TIME:
815 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
816 case SVGA_QUERY_MEMORY_USED:
817 case SVGA_QUERY_NUM_SHADERS:
818 case SVGA_QUERY_NUM_RESOURCES:
819 case SVGA_QUERY_NUM_STATE_OBJECTS:
820 case SVGA_QUERY_NUM_SURFACE_VIEWS:
821 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
822 case SVGA_QUERY_NUM_READBACKS:
823 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
824 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
825 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
826 case SVGA_QUERY_NUM_CONST_UPDATES:
827 /* nothing */
828 break;
829 default:
830 assert(!"svga: unexpected query type in svga_destroy_query()");
831 }
832
833 /* Free the query id */
834 util_bitmask_clear(svga->query_id_bm, sq->id);
835
836 FREE(sq);
837 }
838
839
840 static boolean
841 svga_begin_query(struct pipe_context *pipe, struct pipe_query *q)
842 {
843 struct svga_context *svga = svga_context(pipe);
844 struct svga_query *sq = svga_query(q);
845 enum pipe_error ret;
846
847 assert(sq);
848 assert(sq->type < SVGA_QUERY_MAX);
849
850 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
851 sq, sq->id);
852
853 /* Need to flush out buffered drawing commands so that they don't
854 * get counted in the query results.
855 */
856 svga_hwtnl_flush_retry(svga);
857
858 switch (sq->type) {
859 case PIPE_QUERY_OCCLUSION_COUNTER:
860 case PIPE_QUERY_OCCLUSION_PREDICATE:
861 if (svga_have_vgpu10(svga)) {
862 ret = begin_query_vgpu10(svga, sq);
863 /* also need to start the associated occlusion predicate query */
864 if (sq->predicate) {
865 enum pipe_error status;
866 status = begin_query_vgpu10(svga, svga_query(sq->predicate));
867 assert(status == PIPE_OK);
868 (void) status;
869 }
870 } else {
871 ret = begin_query_vgpu9(svga, sq);
872 }
873 assert(ret == PIPE_OK);
874 (void) ret;
875 break;
876 case PIPE_QUERY_PRIMITIVES_GENERATED:
877 case PIPE_QUERY_PRIMITIVES_EMITTED:
878 case PIPE_QUERY_SO_STATISTICS:
879 case PIPE_QUERY_TIMESTAMP:
880 assert(svga_have_vgpu10(svga));
881 ret = begin_query_vgpu10(svga, sq);
882 assert(ret == PIPE_OK);
883 break;
884 case SVGA_QUERY_NUM_DRAW_CALLS:
885 sq->begin_count = svga->hud.num_draw_calls;
886 break;
887 case SVGA_QUERY_NUM_FALLBACKS:
888 sq->begin_count = svga->hud.num_fallbacks;
889 break;
890 case SVGA_QUERY_NUM_FLUSHES:
891 sq->begin_count = svga->hud.num_flushes;
892 break;
893 case SVGA_QUERY_NUM_VALIDATIONS:
894 sq->begin_count = svga->hud.num_validations;
895 break;
896 case SVGA_QUERY_MAP_BUFFER_TIME:
897 sq->begin_count = svga->hud.map_buffer_time;
898 break;
899 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
900 sq->begin_count = svga->hud.num_resources_mapped;
901 break;
902 case SVGA_QUERY_NUM_BYTES_UPLOADED:
903 sq->begin_count = svga->hud.num_bytes_uploaded;
904 break;
905 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
906 sq->begin_count = svga->hud.command_buffer_size;
907 break;
908 case SVGA_QUERY_FLUSH_TIME:
909 sq->begin_count = svga->hud.flush_time;
910 break;
911 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
912 sq->begin_count = svga->hud.surface_write_flushes;
913 break;
914 case SVGA_QUERY_NUM_READBACKS:
915 sq->begin_count = svga->hud.num_readbacks;
916 break;
917 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
918 sq->begin_count = svga->hud.num_resource_updates;
919 break;
920 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
921 sq->begin_count = svga->hud.num_buffer_uploads;
922 break;
923 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
924 sq->begin_count = svga->hud.num_const_buf_updates;
925 break;
926 case SVGA_QUERY_NUM_CONST_UPDATES:
927 sq->begin_count = svga->hud.num_const_updates;
928 break;
929 case SVGA_QUERY_MEMORY_USED:
930 case SVGA_QUERY_NUM_SHADERS:
931 case SVGA_QUERY_NUM_RESOURCES:
932 case SVGA_QUERY_NUM_STATE_OBJECTS:
933 case SVGA_QUERY_NUM_SURFACE_VIEWS:
934 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
935 /* nothing */
936 break;
937 default:
938 assert(!"unexpected query type in svga_begin_query()");
939 }
940
941 svga->sq[sq->type] = sq;
942
943 return true;
944 }
945
946
947 static bool
948 svga_end_query(struct pipe_context *pipe, struct pipe_query *q)
949 {
950 struct svga_context *svga = svga_context(pipe);
951 struct svga_query *sq = svga_query(q);
952 enum pipe_error ret;
953
954 assert(sq);
955 assert(sq->type < SVGA_QUERY_MAX);
956
957 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
958 sq, sq->id);
959
960 if (sq->type == PIPE_QUERY_TIMESTAMP && svga->sq[sq->type] != sq)
961 svga_begin_query(pipe, q);
962
963 svga_hwtnl_flush_retry(svga);
964
965 assert(svga->sq[sq->type] == sq);
966
967 switch (sq->type) {
968 case PIPE_QUERY_OCCLUSION_COUNTER:
969 case PIPE_QUERY_OCCLUSION_PREDICATE:
970 if (svga_have_vgpu10(svga)) {
971 ret = end_query_vgpu10(svga, sq);
972 /* also need to end the associated occlusion predicate query */
973 if (sq->predicate) {
974 enum pipe_error status;
975 status = end_query_vgpu10(svga, svga_query(sq->predicate));
976 assert(status == PIPE_OK);
977 (void) status;
978 }
979 } else {
980 ret = end_query_vgpu9(svga, sq);
981 }
982 assert(ret == PIPE_OK);
983 (void) ret;
984 /* TODO: Delay flushing. We don't really need to flush here, just ensure
985 * that there is one flush before svga_get_query_result attempts to get
986 * the result.
987 */
988 svga_context_flush(svga, NULL);
989 break;
990 case PIPE_QUERY_PRIMITIVES_GENERATED:
991 case PIPE_QUERY_PRIMITIVES_EMITTED:
992 case PIPE_QUERY_SO_STATISTICS:
993 case PIPE_QUERY_TIMESTAMP:
994 assert(svga_have_vgpu10(svga));
995 ret = end_query_vgpu10(svga, sq);
996 assert(ret == PIPE_OK);
997 break;
998 case SVGA_QUERY_NUM_DRAW_CALLS:
999 sq->end_count = svga->hud.num_draw_calls;
1000 break;
1001 case SVGA_QUERY_NUM_FALLBACKS:
1002 sq->end_count = svga->hud.num_fallbacks;
1003 break;
1004 case SVGA_QUERY_NUM_FLUSHES:
1005 sq->end_count = svga->hud.num_flushes;
1006 break;
1007 case SVGA_QUERY_NUM_VALIDATIONS:
1008 sq->end_count = svga->hud.num_validations;
1009 break;
1010 case SVGA_QUERY_MAP_BUFFER_TIME:
1011 sq->end_count = svga->hud.map_buffer_time;
1012 break;
1013 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
1014 sq->end_count = svga->hud.num_resources_mapped;
1015 break;
1016 case SVGA_QUERY_NUM_BYTES_UPLOADED:
1017 sq->end_count = svga->hud.num_bytes_uploaded;
1018 break;
1019 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1020 sq->end_count = svga->hud.command_buffer_size;
1021 break;
1022 case SVGA_QUERY_FLUSH_TIME:
1023 sq->end_count = svga->hud.flush_time;
1024 break;
1025 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1026 sq->end_count = svga->hud.surface_write_flushes;
1027 break;
1028 case SVGA_QUERY_NUM_READBACKS:
1029 sq->end_count = svga->hud.num_readbacks;
1030 break;
1031 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1032 sq->end_count = svga->hud.num_resource_updates;
1033 break;
1034 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1035 sq->end_count = svga->hud.num_buffer_uploads;
1036 break;
1037 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1038 sq->end_count = svga->hud.num_const_buf_updates;
1039 break;
1040 case SVGA_QUERY_NUM_CONST_UPDATES:
1041 sq->end_count = svga->hud.num_const_updates;
1042 break;
1043 case SVGA_QUERY_MEMORY_USED:
1044 case SVGA_QUERY_NUM_SHADERS:
1045 case SVGA_QUERY_NUM_RESOURCES:
1046 case SVGA_QUERY_NUM_STATE_OBJECTS:
1047 case SVGA_QUERY_NUM_SURFACE_VIEWS:
1048 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1049 /* nothing */
1050 break;
1051 default:
1052 assert(!"unexpected query type in svga_end_query()");
1053 }
1054 svga->sq[sq->type] = NULL;
1055 return true;
1056 }
1057
1058
1059 static boolean
1060 svga_get_query_result(struct pipe_context *pipe,
1061 struct pipe_query *q,
1062 boolean wait,
1063 union pipe_query_result *vresult)
1064 {
1065 struct svga_screen *svgascreen = svga_screen(pipe->screen);
1066 struct svga_context *svga = svga_context(pipe);
1067 struct svga_query *sq = svga_query(q);
1068 uint64_t *result = (uint64_t *)vresult;
1069 boolean ret = TRUE;
1070
1071 assert(sq);
1072
1073 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d wait: %d\n",
1074 __FUNCTION__, sq, sq->id, wait);
1075
1076 switch (sq->type) {
1077 case PIPE_QUERY_OCCLUSION_COUNTER:
1078 if (svga_have_vgpu10(svga)) {
1079 SVGADXOcclusionQueryResult occResult;
1080 ret = get_query_result_vgpu10(svga, sq, wait,
1081 (void *)&occResult, sizeof(occResult));
1082 *result = (uint64_t)occResult.samplesRendered;
1083 } else {
1084 ret = get_query_result_vgpu9(svga, sq, wait, result);
1085 }
1086 break;
1087 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1088 if (svga_have_vgpu10(svga)) {
1089 SVGADXOcclusionPredicateQueryResult occResult;
1090 ret = get_query_result_vgpu10(svga, sq, wait,
1091 (void *)&occResult, sizeof(occResult));
1092 vresult->b = occResult.anySamplesRendered != 0;
1093 } else {
1094 uint64_t count;
1095 ret = get_query_result_vgpu9(svga, sq, wait, &count);
1096 vresult->b = count != 0;
1097 }
1098 break;
1099 }
1100 case PIPE_QUERY_SO_STATISTICS: {
1101 SVGADXStreamOutStatisticsQueryResult sResult;
1102 struct pipe_query_data_so_statistics *pResult =
1103 (struct pipe_query_data_so_statistics *)vresult;
1104
1105 assert(svga_have_vgpu10(svga));
1106 ret = get_query_result_vgpu10(svga, sq, wait,
1107 (void *)&sResult, sizeof(sResult));
1108 pResult->num_primitives_written = sResult.numPrimitivesWritten;
1109 pResult->primitives_storage_needed = sResult.numPrimitivesRequired;
1110 break;
1111 }
1112 case PIPE_QUERY_TIMESTAMP: {
1113 SVGADXTimestampQueryResult sResult;
1114
1115 assert(svga_have_vgpu10(svga));
1116 ret = get_query_result_vgpu10(svga, sq, wait,
1117 (void *)&sResult, sizeof(sResult));
1118 *result = (uint64_t)sResult.timestamp;
1119 break;
1120 }
1121 case PIPE_QUERY_PRIMITIVES_GENERATED: {
1122 SVGADXStreamOutStatisticsQueryResult sResult;
1123
1124 assert(svga_have_vgpu10(svga));
1125 ret = get_query_result_vgpu10(svga, sq, wait,
1126 (void *)&sResult, sizeof sResult);
1127 *result = (uint64_t)sResult.numPrimitivesRequired;
1128 break;
1129 }
1130 case PIPE_QUERY_PRIMITIVES_EMITTED: {
1131 SVGADXStreamOutStatisticsQueryResult sResult;
1132
1133 assert(svga_have_vgpu10(svga));
1134 ret = get_query_result_vgpu10(svga, sq, wait,
1135 (void *)&sResult, sizeof sResult);
1136 *result = (uint64_t)sResult.numPrimitivesWritten;
1137 break;
1138 }
1139 /* These are per-frame counters */
1140 case SVGA_QUERY_NUM_DRAW_CALLS:
1141 case SVGA_QUERY_NUM_FALLBACKS:
1142 case SVGA_QUERY_NUM_FLUSHES:
1143 case SVGA_QUERY_NUM_VALIDATIONS:
1144 case SVGA_QUERY_MAP_BUFFER_TIME:
1145 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
1146 case SVGA_QUERY_NUM_BYTES_UPLOADED:
1147 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1148 case SVGA_QUERY_FLUSH_TIME:
1149 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1150 case SVGA_QUERY_NUM_READBACKS:
1151 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1152 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1153 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1154 case SVGA_QUERY_NUM_CONST_UPDATES:
1155 vresult->u64 = sq->end_count - sq->begin_count;
1156 break;
1157 /* These are running total counters */
1158 case SVGA_QUERY_MEMORY_USED:
1159 vresult->u64 = svgascreen->hud.total_resource_bytes;
1160 break;
1161 case SVGA_QUERY_NUM_SHADERS:
1162 vresult->u64 = svga->hud.num_shaders;
1163 break;
1164 case SVGA_QUERY_NUM_RESOURCES:
1165 vresult->u64 = svgascreen->hud.num_resources;
1166 break;
1167 case SVGA_QUERY_NUM_STATE_OBJECTS:
1168 vresult->u64 = (svga->hud.num_blend_objects +
1169 svga->hud.num_depthstencil_objects +
1170 svga->hud.num_rasterizer_objects +
1171 svga->hud.num_sampler_objects +
1172 svga->hud.num_samplerview_objects +
1173 svga->hud.num_vertexelement_objects);
1174 break;
1175 case SVGA_QUERY_NUM_SURFACE_VIEWS:
1176 vresult->u64 = svga->hud.num_surface_views;
1177 break;
1178 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1179 vresult->u64 = svga->hud.num_generate_mipmap;
1180 break;
1181 default:
1182 assert(!"unexpected query type in svga_get_query_result");
1183 }
1184
1185 SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, *((uint64_t *)vresult));
1186
1187 return ret;
1188 }
1189
1190 static void
1191 svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
1192 boolean condition, uint mode)
1193 {
1194 struct svga_context *svga = svga_context(pipe);
1195 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
1196 struct svga_query *sq = svga_query(q);
1197 SVGA3dQueryId queryId;
1198 enum pipe_error ret;
1199
1200 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
1201
1202 assert(svga_have_vgpu10(svga));
1203 if (sq == NULL) {
1204 queryId = SVGA3D_INVALID_ID;
1205 }
1206 else {
1207 assert(sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION ||
1208 sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE);
1209
1210 if (sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION) {
1211 assert(sq->predicate);
1212 /**
1213 * For conditional rendering, make sure to use the associated
1214 * predicate query.
1215 */
1216 sq = svga_query(sq->predicate);
1217 }
1218 queryId = sq->id;
1219
1220 if ((mode == PIPE_RENDER_COND_WAIT ||
1221 mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
1222 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
1223 }
1224 }
1225 /*
1226 * if the kernel module doesn't support the predication command,
1227 * we'll just render unconditionally.
1228 * This is probably acceptable for the typical case of occlusion culling.
1229 */
1230 if (sws->have_set_predication_cmd) {
1231 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1232 (uint32) condition);
1233 if (ret != PIPE_OK) {
1234 svga_context_flush(svga, NULL);
1235 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1236 (uint32) condition);
1237 }
1238 }
1239 }
1240
1241
1242 /*
1243 * This function is a workaround because we lack the ability to query
1244 * renderer's time synchornously.
1245 */
1246 static uint64_t
1247 svga_get_timestamp(struct pipe_context *pipe)
1248 {
1249 struct pipe_query *q = svga_create_query(pipe, PIPE_QUERY_TIMESTAMP, 0);
1250 union pipe_query_result result;
1251
1252 svga_begin_query(pipe, q);
1253 svga_end_query(pipe,q);
1254 svga_get_query_result(pipe, q, TRUE, &result);
1255 svga_destroy_query(pipe, q);
1256
1257 return result.u64;
1258 }
1259
1260
1261 static void
1262 svga_set_active_query_state(struct pipe_context *pipe, boolean enable)
1263 {
1264 }
1265
1266
1267 void
1268 svga_init_query_functions(struct svga_context *svga)
1269 {
1270 svga->pipe.create_query = svga_create_query;
1271 svga->pipe.destroy_query = svga_destroy_query;
1272 svga->pipe.begin_query = svga_begin_query;
1273 svga->pipe.end_query = svga_end_query;
1274 svga->pipe.get_query_result = svga_get_query_result;
1275 svga->pipe.set_active_query_state = svga_set_active_query_state;
1276 svga->pipe.render_condition = svga_render_condition;
1277 svga->pipe.get_timestamp = svga_get_timestamp;
1278 }