svga: Fix failures caused in fedora 24
[mesa.git] / src / gallium / drivers / svga / svga_pipe_query.c
1 /**********************************************************
2 * Copyright 2008-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "pipe/p_state.h"
27 #include "pipe/p_context.h"
28
29 #include "util/u_bitmask.h"
30 #include "util/u_memory.h"
31
32 #include "svga_cmd.h"
33 #include "svga_context.h"
34 #include "svga_screen.h"
35 #include "svga_resource_buffer.h"
36 #include "svga_winsys.h"
37 #include "svga_debug.h"
38
39
40 /* Fixme: want a public base class for all pipe structs, even if there
41 * isn't much in them.
42 */
43 struct pipe_query {
44 int dummy;
45 };
46
47 struct svga_query {
48 struct pipe_query base;
49 unsigned type; /**< PIPE_QUERY_x or SVGA_QUERY_x */
50 SVGA3dQueryType svga_type; /**< SVGA3D_QUERYTYPE_x or unused */
51
52 unsigned id; /** Per-context query identifier */
53
54 struct pipe_fence_handle *fence;
55
56 /** For PIPE_QUERY_OCCLUSION_COUNTER / SVGA3D_QUERYTYPE_OCCLUSION */
57
58 /* For VGPU9 */
59 struct svga_winsys_buffer *hwbuf;
60 volatile SVGA3dQueryResult *queryResult;
61
62 /** For VGPU10 */
63 struct svga_winsys_gb_query *gb_query;
64 SVGA3dDXQueryFlags flags;
65 unsigned offset; /**< offset to the gb_query memory */
66 struct pipe_query *predicate; /** The associated query that can be used for predicate */
67
68 /** For non-GPU SVGA_QUERY_x queries */
69 uint64_t begin_count, end_count;
70 };
71
72
73 /** cast wrapper */
74 static inline struct svga_query *
75 svga_query(struct pipe_query *q)
76 {
77 return (struct svga_query *)q;
78 }
79
80 /**
81 * VGPU9
82 */
83
84 static boolean
85 svga_get_query_result(struct pipe_context *pipe,
86 struct pipe_query *q,
87 boolean wait,
88 union pipe_query_result *result);
89
90 static enum pipe_error
91 define_query_vgpu9(struct svga_context *svga,
92 struct svga_query *sq)
93 {
94 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
95
96 sq->hwbuf = svga_winsys_buffer_create(svga, 1,
97 SVGA_BUFFER_USAGE_PINNED,
98 sizeof *sq->queryResult);
99 if (!sq->hwbuf)
100 return PIPE_ERROR_OUT_OF_MEMORY;
101
102 sq->queryResult = (SVGA3dQueryResult *)
103 sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
104 if (!sq->queryResult) {
105 sws->buffer_destroy(sws, sq->hwbuf);
106 return PIPE_ERROR_OUT_OF_MEMORY;
107 }
108
109 sq->queryResult->totalSize = sizeof *sq->queryResult;
110 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
111
112 /* We request the buffer to be pinned and assume it is always mapped.
113 * The reason is that we don't want to wait for fences when checking the
114 * query status.
115 */
116 sws->buffer_unmap(sws, sq->hwbuf);
117
118 return PIPE_OK;
119 }
120
121 static enum pipe_error
122 begin_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
123 {
124 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
125 enum pipe_error ret = PIPE_OK;
126
127 if (sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) {
128 /* The application doesn't care for the pending query result.
129 * We cannot let go of the existing buffer and just get a new one
130 * because its storage may be reused for other purposes and clobbered
131 * by the host when it determines the query result. So the only
132 * option here is to wait for the existing query's result -- not a
133 * big deal, given that no sane application would do this.
134 */
135 uint64_t result;
136 svga_get_query_result(&svga->pipe, &sq->base, TRUE, (void*)&result);
137 assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING);
138 }
139
140 sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
141 sws->fence_reference(sws, &sq->fence, NULL);
142
143 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
144 if (ret != PIPE_OK) {
145 svga_context_flush(svga, NULL);
146 ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
147 }
148 return ret;
149 }
150
151 static enum pipe_error
152 end_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
153 {
154 enum pipe_error ret = PIPE_OK;
155
156 /* Set to PENDING before sending EndQuery. */
157 sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING;
158
159 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
160 if (ret != PIPE_OK) {
161 svga_context_flush(svga, NULL);
162 ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
163 }
164 return ret;
165 }
166
167 static boolean
168 get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
169 boolean wait, uint64_t *result)
170 {
171 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
172 enum pipe_error ret;
173 SVGA3dQueryState state;
174
175 if (!sq->fence) {
176 /* The query status won't be updated by the host unless
177 * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause
178 * a synchronous wait on the host.
179 */
180 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
181 if (ret != PIPE_OK) {
182 svga_context_flush(svga, NULL);
183 ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
184 }
185 assert (ret == PIPE_OK);
186 svga_context_flush(svga, &sq->fence);
187 assert(sq->fence);
188 }
189
190 state = sq->queryResult->state;
191 if (state == SVGA3D_QUERYSTATE_PENDING) {
192 if (!wait)
193 return FALSE;
194 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
195 state = sq->queryResult->state;
196 }
197
198 assert(state == SVGA3D_QUERYSTATE_SUCCEEDED ||
199 state == SVGA3D_QUERYSTATE_FAILED);
200
201 *result = (uint64_t)sq->queryResult->result32;
202 return TRUE;
203 }
204
205
206 /**
207 * VGPU10
208 *
209 * There is one query mob allocated for each context to be shared by all
210 * query types. The mob is used to hold queries's state and result. Since
211 * each query result type is of different length, to ease the query allocation
212 * management, the mob is divided into memory blocks. Each memory block
213 * will hold queries of the same type. Multiple memory blocks can be allocated
214 * for a particular query type.
215 *
216 * Currently each memory block is of 184 bytes. We support up to 128
217 * memory blocks. The query memory size is arbitrary right now.
218 * Each occlusion query takes about 8 bytes. One memory block can accomodate
219 * 23 occlusion queries. 128 of those blocks can support up to 2944 occlusion
220 * queries. That seems reasonable for now. If we think this limit is
221 * not enough, we can increase the limit or try to grow the mob in runtime.
222 * Note, SVGA device does not impose one mob per context for queries,
223 * we could allocate multiple mobs for queries; however, wddm KMD does not
224 * currently support that.
225 *
226 * Also note that the GL guest driver does not issue any of the
227 * following commands: DXMoveQuery, DXBindAllQuery & DXReadbackAllQuery.
228 */
229 #define SVGA_QUERY_MEM_BLOCK_SIZE (sizeof(SVGADXQueryResultUnion) * 2)
230 #define SVGA_QUERY_MEM_SIZE (128 * SVGA_QUERY_MEM_BLOCK_SIZE)
231
232 struct svga_qmem_alloc_entry
233 {
234 unsigned start_offset; /* start offset of the memory block */
235 unsigned block_index; /* block index of the memory block */
236 unsigned query_size; /* query size in this memory block */
237 unsigned nquery; /* number of queries allocated */
238 struct util_bitmask *alloc_mask; /* allocation mask */
239 struct svga_qmem_alloc_entry *next; /* next memory block */
240 };
241
242
243 /**
244 * Allocate a memory block from the query object memory
245 * \return -1 if out of memory, else index of the query memory block
246 */
247 static int
248 allocate_query_block(struct svga_context *svga)
249 {
250 int index;
251 unsigned offset;
252
253 /* Find the next available query block */
254 index = util_bitmask_add(svga->gb_query_alloc_mask);
255
256 if (index == UTIL_BITMASK_INVALID_INDEX)
257 return -1;
258
259 offset = index * SVGA_QUERY_MEM_BLOCK_SIZE;
260 if (offset >= svga->gb_query_len) {
261 unsigned i;
262
263 /**
264 * All the memory blocks are allocated, lets see if there is
265 * any empty memory block around that can be freed up.
266 */
267 index = -1;
268 for (i = 0; i < SVGA_QUERY_MAX && index == -1; i++) {
269 struct svga_qmem_alloc_entry *alloc_entry;
270 struct svga_qmem_alloc_entry *prev_alloc_entry = NULL;
271
272 alloc_entry = svga->gb_query_map[i];
273 while (alloc_entry && index == -1) {
274 if (alloc_entry->nquery == 0) {
275 /* This memory block is empty, it can be recycled. */
276 if (prev_alloc_entry) {
277 prev_alloc_entry->next = alloc_entry->next;
278 } else {
279 svga->gb_query_map[i] = alloc_entry->next;
280 }
281 index = alloc_entry->block_index;
282 } else {
283 prev_alloc_entry = alloc_entry;
284 alloc_entry = alloc_entry->next;
285 }
286 }
287 }
288 }
289
290 return index;
291 }
292
293 /**
294 * Allocate a slot in the specified memory block.
295 * All slots in this memory block are of the same size.
296 *
297 * \return -1 if out of memory, else index of the query slot
298 */
299 static int
300 allocate_query_slot(struct svga_context *svga,
301 struct svga_qmem_alloc_entry *alloc)
302 {
303 int index;
304 unsigned offset;
305
306 /* Find the next available slot */
307 index = util_bitmask_add(alloc->alloc_mask);
308
309 if (index == UTIL_BITMASK_INVALID_INDEX)
310 return -1;
311
312 offset = index * alloc->query_size;
313 if (offset >= SVGA_QUERY_MEM_BLOCK_SIZE)
314 return -1;
315
316 alloc->nquery++;
317
318 return index;
319 }
320
321 /**
322 * Deallocate the specified slot in the memory block.
323 * If all slots are freed up, then deallocate the memory block
324 * as well, so it can be allocated for other query type
325 */
326 static void
327 deallocate_query_slot(struct svga_context *svga,
328 struct svga_qmem_alloc_entry *alloc,
329 unsigned index)
330 {
331 assert(index != UTIL_BITMASK_INVALID_INDEX);
332
333 util_bitmask_clear(alloc->alloc_mask, index);
334 alloc->nquery--;
335
336 /**
337 * Don't worry about deallocating the empty memory block here.
338 * The empty memory block will be recycled when no more memory block
339 * can be allocated.
340 */
341 }
342
343 static struct svga_qmem_alloc_entry *
344 allocate_query_block_entry(struct svga_context *svga,
345 unsigned len)
346 {
347 struct svga_qmem_alloc_entry *alloc_entry;
348 int block_index = -1;
349
350 block_index = allocate_query_block(svga);
351 if (block_index == -1)
352 return NULL;
353 alloc_entry = CALLOC_STRUCT(svga_qmem_alloc_entry);
354 if (!alloc_entry)
355 return NULL;
356
357 alloc_entry->block_index = block_index;
358 alloc_entry->start_offset = block_index * SVGA_QUERY_MEM_BLOCK_SIZE;
359 alloc_entry->nquery = 0;
360 alloc_entry->alloc_mask = util_bitmask_create();
361 alloc_entry->next = NULL;
362 alloc_entry->query_size = len;
363
364 return alloc_entry;
365 }
366
367 /**
368 * Allocate a memory slot for a query of the specified type.
369 * It will first search through the memory blocks that are allocated
370 * for the query type. If no memory slot is available, it will try
371 * to allocate another memory block within the query object memory for
372 * this query type.
373 */
374 static int
375 allocate_query(struct svga_context *svga,
376 SVGA3dQueryType type,
377 unsigned len)
378 {
379 struct svga_qmem_alloc_entry *alloc_entry;
380 int slot_index = -1;
381 unsigned offset;
382
383 assert(type < SVGA_QUERY_MAX);
384
385 alloc_entry = svga->gb_query_map[type];
386
387 if (!alloc_entry) {
388 /**
389 * No query memory block has been allocated for this query type,
390 * allocate one now
391 */
392 alloc_entry = allocate_query_block_entry(svga, len);
393 if (!alloc_entry)
394 return -1;
395 svga->gb_query_map[type] = alloc_entry;
396 }
397
398 /* Allocate a slot within the memory block allocated for this query type */
399 slot_index = allocate_query_slot(svga, alloc_entry);
400
401 if (slot_index == -1) {
402 /* This query memory block is full, allocate another one */
403 alloc_entry = allocate_query_block_entry(svga, len);
404 if (!alloc_entry)
405 return -1;
406 alloc_entry->next = svga->gb_query_map[type];
407 svga->gb_query_map[type] = alloc_entry;
408 slot_index = allocate_query_slot(svga, alloc_entry);
409 }
410
411 assert(slot_index != -1);
412 offset = slot_index * len + alloc_entry->start_offset;
413
414 return offset;
415 }
416
417
418 /**
419 * Deallocate memory slot allocated for the specified query
420 */
421 static void
422 deallocate_query(struct svga_context *svga,
423 struct svga_query *sq)
424 {
425 struct svga_qmem_alloc_entry *alloc_entry;
426 unsigned slot_index;
427 unsigned offset = sq->offset;
428
429 alloc_entry = svga->gb_query_map[sq->svga_type];
430
431 while (alloc_entry) {
432 if (offset >= alloc_entry->start_offset &&
433 offset < alloc_entry->start_offset + SVGA_QUERY_MEM_BLOCK_SIZE) {
434
435 /* The slot belongs to this memory block, deallocate it */
436 slot_index = (offset - alloc_entry->start_offset) /
437 alloc_entry->query_size;
438 deallocate_query_slot(svga, alloc_entry, slot_index);
439 alloc_entry = NULL;
440 } else {
441 alloc_entry = alloc_entry->next;
442 }
443 }
444 }
445
446
447 /**
448 * Destroy the gb query object and all the related query structures
449 */
450 static void
451 destroy_gb_query_obj(struct svga_context *svga)
452 {
453 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
454 unsigned i;
455
456 for (i = 0; i < SVGA_QUERY_MAX; i++) {
457 struct svga_qmem_alloc_entry *alloc_entry, *next;
458 alloc_entry = svga->gb_query_map[i];
459 while (alloc_entry) {
460 next = alloc_entry->next;
461 util_bitmask_destroy(alloc_entry->alloc_mask);
462 FREE(alloc_entry);
463 alloc_entry = next;
464 }
465 svga->gb_query_map[i] = NULL;
466 }
467
468 if (svga->gb_query)
469 sws->query_destroy(sws, svga->gb_query);
470 svga->gb_query = NULL;
471
472 util_bitmask_destroy(svga->gb_query_alloc_mask);
473 }
474
475 /**
476 * Define query and create the gb query object if it is not already created.
477 * There is only one gb query object per context which will be shared by
478 * queries of all types.
479 */
480 static enum pipe_error
481 define_query_vgpu10(struct svga_context *svga,
482 struct svga_query *sq, int resultLen)
483 {
484 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
485 int qlen;
486 enum pipe_error ret = PIPE_OK;
487
488 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
489
490 if (svga->gb_query == NULL) {
491 /* Create a gb query object */
492 svga->gb_query = sws->query_create(sws, SVGA_QUERY_MEM_SIZE);
493 if (!svga->gb_query)
494 return PIPE_ERROR_OUT_OF_MEMORY;
495 svga->gb_query_len = SVGA_QUERY_MEM_SIZE;
496 memset (svga->gb_query_map, 0, sizeof(svga->gb_query_map));
497 svga->gb_query_alloc_mask = util_bitmask_create();
498
499 /* Bind the query object to the context */
500 if (svga->swc->query_bind(svga->swc, svga->gb_query,
501 SVGA_QUERY_FLAG_SET) != PIPE_OK) {
502 svga_context_flush(svga, NULL);
503 svga->swc->query_bind(svga->swc, svga->gb_query,
504 SVGA_QUERY_FLAG_SET);
505 }
506 }
507
508 sq->gb_query = svga->gb_query;
509
510 /* Allocate an integer ID for this query */
511 sq->id = util_bitmask_add(svga->query_id_bm);
512 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
513 return PIPE_ERROR_OUT_OF_MEMORY;
514
515 /* Find a slot for this query in the gb object */
516 qlen = resultLen + sizeof(SVGA3dQueryState);
517 sq->offset = allocate_query(svga, sq->svga_type, qlen);
518 if (sq->offset == -1)
519 return PIPE_ERROR_OUT_OF_MEMORY;
520
521 SVGA_DBG(DEBUG_QUERY, " query type=%d qid=0x%x offset=%d\n",
522 sq->svga_type, sq->id, sq->offset);
523
524 /**
525 * Send SVGA3D commands to define the query
526 */
527 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
528 if (ret != PIPE_OK) {
529 svga_context_flush(svga, NULL);
530 ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
531 }
532 if (ret != PIPE_OK)
533 return PIPE_ERROR_OUT_OF_MEMORY;
534
535 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
536 if (ret != PIPE_OK) {
537 svga_context_flush(svga, NULL);
538 ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
539 }
540 assert(ret == PIPE_OK);
541
542 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
543 if (ret != PIPE_OK) {
544 svga_context_flush(svga, NULL);
545 ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
546 }
547 assert(ret == PIPE_OK);
548
549 return PIPE_OK;
550 }
551
552 static enum pipe_error
553 destroy_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
554 {
555 enum pipe_error ret;
556
557 ret = SVGA3D_vgpu10_DestroyQuery(svga->swc, sq->id);
558
559 /* Deallocate the memory slot allocated for this query */
560 deallocate_query(svga, sq);
561
562 return ret;
563 }
564
565
566 /**
567 * Rebind queryies to the context.
568 */
569 static void
570 rebind_vgpu10_query(struct svga_context *svga)
571 {
572 if (svga->swc->query_bind(svga->swc, svga->gb_query,
573 SVGA_QUERY_FLAG_REF) != PIPE_OK) {
574 svga_context_flush(svga, NULL);
575 svga->swc->query_bind(svga->swc, svga->gb_query,
576 SVGA_QUERY_FLAG_REF);
577 }
578
579 svga->rebind.flags.query = FALSE;
580 }
581
582
583 static enum pipe_error
584 begin_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
585 {
586 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
587 enum pipe_error ret = PIPE_OK;
588 int status = 0;
589
590 sws->fence_reference(sws, &sq->fence, NULL);
591
592 /* Initialize the query state to NEW */
593 status = sws->query_init(sws, sq->gb_query, sq->offset, SVGA3D_QUERYSTATE_NEW);
594 if (status)
595 return PIPE_ERROR;
596
597 if (svga->rebind.flags.query) {
598 rebind_vgpu10_query(svga);
599 }
600
601 /* Send the BeginQuery command to the device */
602 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
603 if (ret != PIPE_OK) {
604 svga_context_flush(svga, NULL);
605 ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
606 }
607 return ret;
608 }
609
610 static enum pipe_error
611 end_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
612 {
613 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
614 enum pipe_error ret = PIPE_OK;
615
616 if (svga->rebind.flags.query) {
617 rebind_vgpu10_query(svga);
618 }
619
620 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
621 if (ret != PIPE_OK) {
622 svga_context_flush(svga, NULL);
623 ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
624 }
625
626 /* Finish fence is copied here from get_query_result_vgpu10. This helps
627 * with cases where svga_begin_query might be called again before
628 * svga_get_query_result, such as GL_TIME_ELAPSED.
629 */
630 if (!sq->fence) {
631 svga_context_flush(svga, &sq->fence);
632 }
633 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
634
635 return ret;
636 }
637
638 static boolean
639 get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
640 boolean wait, void *result, int resultLen)
641 {
642 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
643 SVGA3dQueryState queryState;
644
645 if (svga->rebind.flags.query) {
646 rebind_vgpu10_query(svga);
647 }
648
649 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
650
651 if (queryState == SVGA3D_QUERYSTATE_PENDING) {
652 if (!wait)
653 return FALSE;
654 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
655 sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
656 }
657
658 assert(queryState == SVGA3D_QUERYSTATE_SUCCEEDED ||
659 queryState == SVGA3D_QUERYSTATE_FAILED);
660
661 return TRUE;
662 }
663
664 static struct pipe_query *
665 svga_create_query(struct pipe_context *pipe,
666 unsigned query_type,
667 unsigned index)
668 {
669 struct svga_context *svga = svga_context(pipe);
670 struct svga_query *sq;
671
672 assert(query_type < SVGA_QUERY_MAX);
673
674 sq = CALLOC_STRUCT(svga_query);
675 if (!sq)
676 goto fail;
677
678 /* Allocate an integer ID for the query */
679 sq->id = util_bitmask_add(svga->query_id_bm);
680 if (sq->id == UTIL_BITMASK_INVALID_INDEX)
681 goto fail;
682
683 SVGA_DBG(DEBUG_QUERY, "%s type=%d sq=0x%x id=%d\n", __FUNCTION__,
684 query_type, sq, sq->id);
685
686 switch (query_type) {
687 case PIPE_QUERY_OCCLUSION_COUNTER:
688 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
689 if (svga_have_vgpu10(svga)) {
690 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionQueryResult));
691
692 /**
693 * In OpenGL, occlusion counter query can be used in conditional
694 * rendering; however, in DX10, only OCCLUSION_PREDICATE query can
695 * be used for predication. Hence, we need to create an occlusion
696 * predicate query along with the occlusion counter query. So when
697 * the occlusion counter query is used for predication, the associated
698 * query of occlusion predicate type will be used
699 * in the SetPredication command.
700 */
701 sq->predicate = svga_create_query(pipe, PIPE_QUERY_OCCLUSION_PREDICATE, index);
702
703 } else {
704 define_query_vgpu9(svga, sq);
705 }
706 break;
707 case PIPE_QUERY_OCCLUSION_PREDICATE:
708 if (svga_have_vgpu10(svga)) {
709 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE;
710 define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionPredicateQueryResult));
711 } else {
712 sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
713 define_query_vgpu9(svga, sq);
714 }
715 break;
716 case PIPE_QUERY_PRIMITIVES_GENERATED:
717 case PIPE_QUERY_PRIMITIVES_EMITTED:
718 case PIPE_QUERY_SO_STATISTICS:
719 assert(svga_have_vgpu10(svga));
720 sq->svga_type = SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS;
721 define_query_vgpu10(svga, sq,
722 sizeof(SVGADXStreamOutStatisticsQueryResult));
723 break;
724 case PIPE_QUERY_TIMESTAMP:
725 assert(svga_have_vgpu10(svga));
726 sq->svga_type = SVGA3D_QUERYTYPE_TIMESTAMP;
727 define_query_vgpu10(svga, sq,
728 sizeof(SVGADXTimestampQueryResult));
729 break;
730 case SVGA_QUERY_NUM_DRAW_CALLS:
731 case SVGA_QUERY_NUM_FALLBACKS:
732 case SVGA_QUERY_NUM_FLUSHES:
733 case SVGA_QUERY_NUM_VALIDATIONS:
734 case SVGA_QUERY_MAP_BUFFER_TIME:
735 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
736 case SVGA_QUERY_NUM_BYTES_UPLOADED:
737 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
738 case SVGA_QUERY_FLUSH_TIME:
739 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
740 case SVGA_QUERY_MEMORY_USED:
741 case SVGA_QUERY_NUM_SHADERS:
742 case SVGA_QUERY_NUM_RESOURCES:
743 case SVGA_QUERY_NUM_STATE_OBJECTS:
744 case SVGA_QUERY_NUM_SURFACE_VIEWS:
745 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
746 case SVGA_QUERY_NUM_READBACKS:
747 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
748 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
749 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
750 case SVGA_QUERY_NUM_CONST_UPDATES:
751 break;
752 default:
753 assert(!"unexpected query type in svga_create_query()");
754 }
755
756 sq->type = query_type;
757
758 return &sq->base;
759
760 fail:
761 FREE(sq);
762 return NULL;
763 }
764
765 static void
766 svga_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
767 {
768 struct svga_context *svga = svga_context(pipe);
769 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
770 struct svga_query *sq;
771
772 if (!q) {
773 destroy_gb_query_obj(svga);
774 return;
775 }
776
777 sq = svga_query(q);
778
779 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
780 sq, sq->id);
781
782 switch (sq->type) {
783 case PIPE_QUERY_OCCLUSION_COUNTER:
784 case PIPE_QUERY_OCCLUSION_PREDICATE:
785 if (svga_have_vgpu10(svga)) {
786 /* make sure to also destroy any associated predicate query */
787 if (sq->predicate)
788 svga_destroy_query(pipe, sq->predicate);
789 destroy_query_vgpu10(svga, sq);
790 } else {
791 sws->buffer_destroy(sws, sq->hwbuf);
792 }
793 sws->fence_reference(sws, &sq->fence, NULL);
794 break;
795 case PIPE_QUERY_PRIMITIVES_GENERATED:
796 case PIPE_QUERY_PRIMITIVES_EMITTED:
797 case PIPE_QUERY_SO_STATISTICS:
798 case PIPE_QUERY_TIMESTAMP:
799 assert(svga_have_vgpu10(svga));
800 destroy_query_vgpu10(svga, sq);
801 sws->fence_reference(sws, &sq->fence, NULL);
802 break;
803 case SVGA_QUERY_NUM_DRAW_CALLS:
804 case SVGA_QUERY_NUM_FALLBACKS:
805 case SVGA_QUERY_NUM_FLUSHES:
806 case SVGA_QUERY_NUM_VALIDATIONS:
807 case SVGA_QUERY_MAP_BUFFER_TIME:
808 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
809 case SVGA_QUERY_NUM_BYTES_UPLOADED:
810 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
811 case SVGA_QUERY_FLUSH_TIME:
812 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
813 case SVGA_QUERY_MEMORY_USED:
814 case SVGA_QUERY_NUM_SHADERS:
815 case SVGA_QUERY_NUM_RESOURCES:
816 case SVGA_QUERY_NUM_STATE_OBJECTS:
817 case SVGA_QUERY_NUM_SURFACE_VIEWS:
818 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
819 case SVGA_QUERY_NUM_READBACKS:
820 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
821 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
822 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
823 case SVGA_QUERY_NUM_CONST_UPDATES:
824 /* nothing */
825 break;
826 default:
827 assert(!"svga: unexpected query type in svga_destroy_query()");
828 }
829
830 /* Free the query id */
831 util_bitmask_clear(svga->query_id_bm, sq->id);
832
833 FREE(sq);
834 }
835
836
837 static boolean
838 svga_begin_query(struct pipe_context *pipe, struct pipe_query *q)
839 {
840 struct svga_context *svga = svga_context(pipe);
841 struct svga_query *sq = svga_query(q);
842 enum pipe_error ret;
843
844 assert(sq);
845 assert(sq->type < SVGA_QUERY_MAX);
846
847 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
848 sq, sq->id);
849
850 /* Need to flush out buffered drawing commands so that they don't
851 * get counted in the query results.
852 */
853 svga_hwtnl_flush_retry(svga);
854
855 switch (sq->type) {
856 case PIPE_QUERY_OCCLUSION_COUNTER:
857 case PIPE_QUERY_OCCLUSION_PREDICATE:
858 if (svga_have_vgpu10(svga)) {
859 ret = begin_query_vgpu10(svga, sq);
860 /* also need to start the associated occlusion predicate query */
861 if (sq->predicate) {
862 enum pipe_error status;
863 status = begin_query_vgpu10(svga, svga_query(sq->predicate));
864 assert(status == PIPE_OK);
865 (void) status;
866 }
867 } else {
868 ret = begin_query_vgpu9(svga, sq);
869 }
870 assert(ret == PIPE_OK);
871 (void) ret;
872 break;
873 case PIPE_QUERY_PRIMITIVES_GENERATED:
874 case PIPE_QUERY_PRIMITIVES_EMITTED:
875 case PIPE_QUERY_SO_STATISTICS:
876 case PIPE_QUERY_TIMESTAMP:
877 assert(svga_have_vgpu10(svga));
878 ret = begin_query_vgpu10(svga, sq);
879 assert(ret == PIPE_OK);
880 break;
881 case SVGA_QUERY_NUM_DRAW_CALLS:
882 sq->begin_count = svga->hud.num_draw_calls;
883 break;
884 case SVGA_QUERY_NUM_FALLBACKS:
885 sq->begin_count = svga->hud.num_fallbacks;
886 break;
887 case SVGA_QUERY_NUM_FLUSHES:
888 sq->begin_count = svga->hud.num_flushes;
889 break;
890 case SVGA_QUERY_NUM_VALIDATIONS:
891 sq->begin_count = svga->hud.num_validations;
892 break;
893 case SVGA_QUERY_MAP_BUFFER_TIME:
894 sq->begin_count = svga->hud.map_buffer_time;
895 break;
896 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
897 sq->begin_count = svga->hud.num_resources_mapped;
898 break;
899 case SVGA_QUERY_NUM_BYTES_UPLOADED:
900 sq->begin_count = svga->hud.num_bytes_uploaded;
901 break;
902 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
903 sq->begin_count = svga->hud.command_buffer_size;
904 break;
905 case SVGA_QUERY_FLUSH_TIME:
906 sq->begin_count = svga->hud.flush_time;
907 break;
908 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
909 sq->begin_count = svga->hud.surface_write_flushes;
910 break;
911 case SVGA_QUERY_NUM_READBACKS:
912 sq->begin_count = svga->hud.num_readbacks;
913 break;
914 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
915 sq->begin_count = svga->hud.num_resource_updates;
916 break;
917 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
918 sq->begin_count = svga->hud.num_buffer_uploads;
919 break;
920 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
921 sq->begin_count = svga->hud.num_const_buf_updates;
922 break;
923 case SVGA_QUERY_NUM_CONST_UPDATES:
924 sq->begin_count = svga->hud.num_const_updates;
925 break;
926 case SVGA_QUERY_MEMORY_USED:
927 case SVGA_QUERY_NUM_SHADERS:
928 case SVGA_QUERY_NUM_RESOURCES:
929 case SVGA_QUERY_NUM_STATE_OBJECTS:
930 case SVGA_QUERY_NUM_SURFACE_VIEWS:
931 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
932 /* nothing */
933 break;
934 default:
935 assert(!"unexpected query type in svga_begin_query()");
936 }
937
938 svga->sq[sq->type] = sq;
939
940 return true;
941 }
942
943
944 static bool
945 svga_end_query(struct pipe_context *pipe, struct pipe_query *q)
946 {
947 struct svga_context *svga = svga_context(pipe);
948 struct svga_query *sq = svga_query(q);
949 enum pipe_error ret;
950
951 assert(sq);
952 assert(sq->type < SVGA_QUERY_MAX);
953
954 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
955 sq, sq->id);
956
957 if (sq->type == PIPE_QUERY_TIMESTAMP && svga->sq[sq->type] != sq)
958 svga_begin_query(pipe, q);
959
960 svga_hwtnl_flush_retry(svga);
961
962 assert(svga->sq[sq->type] == sq);
963
964 switch (sq->type) {
965 case PIPE_QUERY_OCCLUSION_COUNTER:
966 case PIPE_QUERY_OCCLUSION_PREDICATE:
967 if (svga_have_vgpu10(svga)) {
968 ret = end_query_vgpu10(svga, sq);
969 /* also need to end the associated occlusion predicate query */
970 if (sq->predicate) {
971 enum pipe_error status;
972 status = end_query_vgpu10(svga, svga_query(sq->predicate));
973 assert(status == PIPE_OK);
974 (void) status;
975 }
976 } else {
977 ret = end_query_vgpu9(svga, sq);
978 }
979 assert(ret == PIPE_OK);
980 (void) ret;
981 /* TODO: Delay flushing. We don't really need to flush here, just ensure
982 * that there is one flush before svga_get_query_result attempts to get
983 * the result.
984 */
985 svga_context_flush(svga, NULL);
986 break;
987 case PIPE_QUERY_PRIMITIVES_GENERATED:
988 case PIPE_QUERY_PRIMITIVES_EMITTED:
989 case PIPE_QUERY_SO_STATISTICS:
990 case PIPE_QUERY_TIMESTAMP:
991 assert(svga_have_vgpu10(svga));
992 ret = end_query_vgpu10(svga, sq);
993 assert(ret == PIPE_OK);
994 break;
995 case SVGA_QUERY_NUM_DRAW_CALLS:
996 sq->end_count = svga->hud.num_draw_calls;
997 break;
998 case SVGA_QUERY_NUM_FALLBACKS:
999 sq->end_count = svga->hud.num_fallbacks;
1000 break;
1001 case SVGA_QUERY_NUM_FLUSHES:
1002 sq->end_count = svga->hud.num_flushes;
1003 break;
1004 case SVGA_QUERY_NUM_VALIDATIONS:
1005 sq->end_count = svga->hud.num_validations;
1006 break;
1007 case SVGA_QUERY_MAP_BUFFER_TIME:
1008 sq->end_count = svga->hud.map_buffer_time;
1009 break;
1010 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
1011 sq->end_count = svga->hud.num_resources_mapped;
1012 break;
1013 case SVGA_QUERY_NUM_BYTES_UPLOADED:
1014 sq->end_count = svga->hud.num_bytes_uploaded;
1015 break;
1016 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1017 sq->end_count = svga->hud.command_buffer_size;
1018 break;
1019 case SVGA_QUERY_FLUSH_TIME:
1020 sq->end_count = svga->hud.flush_time;
1021 break;
1022 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1023 sq->end_count = svga->hud.surface_write_flushes;
1024 break;
1025 case SVGA_QUERY_NUM_READBACKS:
1026 sq->end_count = svga->hud.num_readbacks;
1027 break;
1028 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1029 sq->end_count = svga->hud.num_resource_updates;
1030 break;
1031 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1032 sq->end_count = svga->hud.num_buffer_uploads;
1033 break;
1034 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1035 sq->end_count = svga->hud.num_const_buf_updates;
1036 break;
1037 case SVGA_QUERY_NUM_CONST_UPDATES:
1038 sq->end_count = svga->hud.num_const_updates;
1039 break;
1040 case SVGA_QUERY_MEMORY_USED:
1041 case SVGA_QUERY_NUM_SHADERS:
1042 case SVGA_QUERY_NUM_RESOURCES:
1043 case SVGA_QUERY_NUM_STATE_OBJECTS:
1044 case SVGA_QUERY_NUM_SURFACE_VIEWS:
1045 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1046 /* nothing */
1047 break;
1048 default:
1049 assert(!"unexpected query type in svga_end_query()");
1050 }
1051 svga->sq[sq->type] = NULL;
1052 return true;
1053 }
1054
1055
1056 static boolean
1057 svga_get_query_result(struct pipe_context *pipe,
1058 struct pipe_query *q,
1059 boolean wait,
1060 union pipe_query_result *vresult)
1061 {
1062 struct svga_screen *svgascreen = svga_screen(pipe->screen);
1063 struct svga_context *svga = svga_context(pipe);
1064 struct svga_query *sq = svga_query(q);
1065 uint64_t *result = (uint64_t *)vresult;
1066 boolean ret = TRUE;
1067
1068 assert(sq);
1069
1070 SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d wait: %d\n",
1071 __FUNCTION__, sq, sq->id, wait);
1072
1073 switch (sq->type) {
1074 case PIPE_QUERY_OCCLUSION_COUNTER:
1075 if (svga_have_vgpu10(svga)) {
1076 SVGADXOcclusionQueryResult occResult;
1077 ret = get_query_result_vgpu10(svga, sq, wait,
1078 (void *)&occResult, sizeof(occResult));
1079 *result = (uint64_t)occResult.samplesRendered;
1080 } else {
1081 ret = get_query_result_vgpu9(svga, sq, wait, result);
1082 }
1083 break;
1084 case PIPE_QUERY_OCCLUSION_PREDICATE: {
1085 if (svga_have_vgpu10(svga)) {
1086 SVGADXOcclusionPredicateQueryResult occResult;
1087 ret = get_query_result_vgpu10(svga, sq, wait,
1088 (void *)&occResult, sizeof(occResult));
1089 vresult->b = occResult.anySamplesRendered != 0;
1090 } else {
1091 uint64_t count;
1092 ret = get_query_result_vgpu9(svga, sq, wait, &count);
1093 vresult->b = count != 0;
1094 }
1095 break;
1096 }
1097 case PIPE_QUERY_SO_STATISTICS: {
1098 SVGADXStreamOutStatisticsQueryResult sResult;
1099 struct pipe_query_data_so_statistics *pResult =
1100 (struct pipe_query_data_so_statistics *)vresult;
1101
1102 assert(svga_have_vgpu10(svga));
1103 ret = get_query_result_vgpu10(svga, sq, wait,
1104 (void *)&sResult, sizeof(sResult));
1105 pResult->num_primitives_written = sResult.numPrimitivesWritten;
1106 pResult->primitives_storage_needed = sResult.numPrimitivesRequired;
1107 break;
1108 }
1109 case PIPE_QUERY_TIMESTAMP: {
1110 SVGADXTimestampQueryResult sResult;
1111
1112 assert(svga_have_vgpu10(svga));
1113 ret = get_query_result_vgpu10(svga, sq, wait,
1114 (void *)&sResult, sizeof(sResult));
1115 *result = (uint64_t)sResult.timestamp;
1116 break;
1117 }
1118 case PIPE_QUERY_PRIMITIVES_GENERATED: {
1119 SVGADXStreamOutStatisticsQueryResult sResult;
1120
1121 assert(svga_have_vgpu10(svga));
1122 ret = get_query_result_vgpu10(svga, sq, wait,
1123 (void *)&sResult, sizeof sResult);
1124 *result = (uint64_t)sResult.numPrimitivesRequired;
1125 break;
1126 }
1127 case PIPE_QUERY_PRIMITIVES_EMITTED: {
1128 SVGADXStreamOutStatisticsQueryResult sResult;
1129
1130 assert(svga_have_vgpu10(svga));
1131 ret = get_query_result_vgpu10(svga, sq, wait,
1132 (void *)&sResult, sizeof sResult);
1133 *result = (uint64_t)sResult.numPrimitivesWritten;
1134 break;
1135 }
1136 /* These are per-frame counters */
1137 case SVGA_QUERY_NUM_DRAW_CALLS:
1138 case SVGA_QUERY_NUM_FALLBACKS:
1139 case SVGA_QUERY_NUM_FLUSHES:
1140 case SVGA_QUERY_NUM_VALIDATIONS:
1141 case SVGA_QUERY_MAP_BUFFER_TIME:
1142 case SVGA_QUERY_NUM_RESOURCES_MAPPED:
1143 case SVGA_QUERY_NUM_BYTES_UPLOADED:
1144 case SVGA_QUERY_COMMAND_BUFFER_SIZE:
1145 case SVGA_QUERY_FLUSH_TIME:
1146 case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
1147 case SVGA_QUERY_NUM_READBACKS:
1148 case SVGA_QUERY_NUM_RESOURCE_UPDATES:
1149 case SVGA_QUERY_NUM_BUFFER_UPLOADS:
1150 case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
1151 case SVGA_QUERY_NUM_CONST_UPDATES:
1152 vresult->u64 = sq->end_count - sq->begin_count;
1153 break;
1154 /* These are running total counters */
1155 case SVGA_QUERY_MEMORY_USED:
1156 vresult->u64 = svgascreen->hud.total_resource_bytes;
1157 break;
1158 case SVGA_QUERY_NUM_SHADERS:
1159 vresult->u64 = svga->hud.num_shaders;
1160 break;
1161 case SVGA_QUERY_NUM_RESOURCES:
1162 vresult->u64 = svgascreen->hud.num_resources;
1163 break;
1164 case SVGA_QUERY_NUM_STATE_OBJECTS:
1165 vresult->u64 = (svga->hud.num_blend_objects +
1166 svga->hud.num_depthstencil_objects +
1167 svga->hud.num_rasterizer_objects +
1168 svga->hud.num_sampler_objects +
1169 svga->hud.num_samplerview_objects +
1170 svga->hud.num_vertexelement_objects);
1171 break;
1172 case SVGA_QUERY_NUM_SURFACE_VIEWS:
1173 vresult->u64 = svga->hud.num_surface_views;
1174 break;
1175 case SVGA_QUERY_NUM_GENERATE_MIPMAP:
1176 vresult->u64 = svga->hud.num_generate_mipmap;
1177 break;
1178 default:
1179 assert(!"unexpected query type in svga_get_query_result");
1180 }
1181
1182 SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, *((uint64_t *)vresult));
1183
1184 return ret;
1185 }
1186
1187 static void
1188 svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
1189 boolean condition, uint mode)
1190 {
1191 struct svga_context *svga = svga_context(pipe);
1192 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
1193 struct svga_query *sq = svga_query(q);
1194 SVGA3dQueryId queryId;
1195 enum pipe_error ret;
1196
1197 SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
1198
1199 assert(svga_have_vgpu10(svga));
1200 if (sq == NULL) {
1201 queryId = SVGA3D_INVALID_ID;
1202 }
1203 else {
1204 assert(sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION ||
1205 sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE);
1206
1207 if (sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION) {
1208 assert(sq->predicate);
1209 /**
1210 * For conditional rendering, make sure to use the associated
1211 * predicate query.
1212 */
1213 sq = svga_query(sq->predicate);
1214 }
1215 queryId = sq->id;
1216
1217 if ((mode == PIPE_RENDER_COND_WAIT ||
1218 mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
1219 sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
1220 }
1221 }
1222 /*
1223 * if the kernel module doesn't support the predication command,
1224 * we'll just render unconditionally.
1225 * This is probably acceptable for the typical case of occlusion culling.
1226 */
1227 if (sws->have_set_predication_cmd) {
1228 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1229 (uint32) condition);
1230 if (ret != PIPE_OK) {
1231 svga_context_flush(svga, NULL);
1232 ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
1233 (uint32) condition);
1234 }
1235 }
1236 }
1237
1238
1239 /*
1240 * This function is a workaround because we lack the ability to query
1241 * renderer's time synchornously.
1242 */
1243 static uint64_t
1244 svga_get_timestamp(struct pipe_context *pipe)
1245 {
1246 struct pipe_query *q = svga_create_query(pipe, PIPE_QUERY_TIMESTAMP, 0);
1247 union pipe_query_result result;
1248
1249 svga_begin_query(pipe, q);
1250 svga_end_query(pipe,q);
1251 svga_get_query_result(pipe, q, TRUE, &result);
1252 svga_destroy_query(pipe, q);
1253
1254 return result.u64;
1255 }
1256
1257
1258 static void
1259 svga_set_active_query_state(struct pipe_context *pipe, boolean enable)
1260 {
1261 }
1262
1263
1264 void
1265 svga_init_query_functions(struct svga_context *svga)
1266 {
1267 svga->pipe.create_query = svga_create_query;
1268 svga->pipe.destroy_query = svga_destroy_query;
1269 svga->pipe.begin_query = svga_begin_query;
1270 svga->pipe.end_query = svga_end_query;
1271 svga->pipe.get_query_result = svga_get_query_result;
1272 svga->pipe.set_active_query_state = svga_set_active_query_state;
1273 svga->pipe.render_condition = svga_render_condition;
1274 svga->pipe.get_timestamp = svga_get_timestamp;
1275 }