1 /**********************************************************
2 * Copyright 2008-2015 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
26 #include "pipe/p_state.h"
27 #include "pipe/p_context.h"
29 #include "util/u_bitmask.h"
30 #include "util/u_memory.h"
33 #include "svga_context.h"
34 #include "svga_screen.h"
35 #include "svga_resource_buffer.h"
36 #include "svga_winsys.h"
37 #include "svga_debug.h"
40 /* Fixme: want a public base class for all pipe structs, even if there
48 struct pipe_query base
;
49 unsigned type
; /**< PIPE_QUERY_x or SVGA_QUERY_x */
50 SVGA3dQueryType svga_type
; /**< SVGA3D_QUERYTYPE_x or unused */
52 unsigned id
; /** Per-context query identifier */
53 boolean active
; /** TRUE if query is active */
55 struct pipe_fence_handle
*fence
;
57 /** For PIPE_QUERY_OCCLUSION_COUNTER / SVGA3D_QUERYTYPE_OCCLUSION */
60 struct svga_winsys_buffer
*hwbuf
;
61 volatile SVGA3dQueryResult
*queryResult
;
64 struct svga_winsys_gb_query
*gb_query
;
65 SVGA3dDXQueryFlags flags
;
66 unsigned offset
; /**< offset to the gb_query memory */
67 struct pipe_query
*predicate
; /** The associated query that can be used for predicate */
69 /** For non-GPU SVGA_QUERY_x queries */
70 uint64_t begin_count
, end_count
;
75 static inline struct svga_query
*
76 svga_query(struct pipe_query
*q
)
78 return (struct svga_query
*)q
;
86 svga_get_query_result(struct pipe_context
*pipe
,
89 union pipe_query_result
*result
);
91 static enum pipe_error
92 define_query_vgpu9(struct svga_context
*svga
,
93 struct svga_query
*sq
)
95 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
97 sq
->hwbuf
= svga_winsys_buffer_create(svga
, 1,
98 SVGA_BUFFER_USAGE_PINNED
,
99 sizeof *sq
->queryResult
);
101 return PIPE_ERROR_OUT_OF_MEMORY
;
103 sq
->queryResult
= (SVGA3dQueryResult
*)
104 sws
->buffer_map(sws
, sq
->hwbuf
, PIPE_TRANSFER_WRITE
);
105 if (!sq
->queryResult
) {
106 sws
->buffer_destroy(sws
, sq
->hwbuf
);
107 return PIPE_ERROR_OUT_OF_MEMORY
;
110 sq
->queryResult
->totalSize
= sizeof *sq
->queryResult
;
111 sq
->queryResult
->state
= SVGA3D_QUERYSTATE_NEW
;
113 /* We request the buffer to be pinned and assume it is always mapped.
114 * The reason is that we don't want to wait for fences when checking the
117 sws
->buffer_unmap(sws
, sq
->hwbuf
);
123 begin_query_vgpu9(struct svga_context
*svga
, struct svga_query
*sq
)
125 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
127 if (sq
->queryResult
->state
== SVGA3D_QUERYSTATE_PENDING
) {
128 /* The application doesn't care for the pending query result.
129 * We cannot let go of the existing buffer and just get a new one
130 * because its storage may be reused for other purposes and clobbered
131 * by the host when it determines the query result. So the only
132 * option here is to wait for the existing query's result -- not a
133 * big deal, given that no sane application would do this.
136 svga_get_query_result(&svga
->pipe
, &sq
->base
, TRUE
, (void*)&result
);
137 assert(sq
->queryResult
->state
!= SVGA3D_QUERYSTATE_PENDING
);
140 sq
->queryResult
->state
= SVGA3D_QUERYSTATE_NEW
;
141 sws
->fence_reference(sws
, &sq
->fence
, NULL
);
143 SVGA_RETRY(svga
, SVGA3D_BeginQuery(svga
->swc
, sq
->svga_type
));
147 end_query_vgpu9(struct svga_context
*svga
, struct svga_query
*sq
)
149 /* Set to PENDING before sending EndQuery. */
150 sq
->queryResult
->state
= SVGA3D_QUERYSTATE_PENDING
;
152 SVGA_RETRY(svga
, SVGA3D_EndQuery(svga
->swc
, sq
->svga_type
, sq
->hwbuf
));
156 get_query_result_vgpu9(struct svga_context
*svga
, struct svga_query
*sq
,
157 bool wait
, uint64_t *result
)
159 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
160 SVGA3dQueryState state
;
163 /* The query status won't be updated by the host unless
164 * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause
165 * a synchronous wait on the host.
167 SVGA_RETRY(svga
, SVGA3D_WaitForQuery(svga
->swc
, sq
->svga_type
,
169 svga_context_flush(svga
, &sq
->fence
);
173 state
= sq
->queryResult
->state
;
174 if (state
== SVGA3D_QUERYSTATE_PENDING
) {
177 sws
->fence_finish(sws
, sq
->fence
, PIPE_TIMEOUT_INFINITE
,
178 SVGA_FENCE_FLAG_QUERY
);
179 state
= sq
->queryResult
->state
;
182 assert(state
== SVGA3D_QUERYSTATE_SUCCEEDED
||
183 state
== SVGA3D_QUERYSTATE_FAILED
);
185 *result
= (uint64_t)sq
->queryResult
->result32
;
193 * There is one query mob allocated for each context to be shared by all
194 * query types. The mob is used to hold queries's state and result. Since
195 * each query result type is of different length, to ease the query allocation
196 * management, the mob is divided into memory blocks. Each memory block
197 * will hold queries of the same type. Multiple memory blocks can be allocated
198 * for a particular query type.
200 * Currently each memory block is of 184 bytes. We support up to 512
201 * memory blocks. The query memory size is arbitrary right now.
202 * Each occlusion query takes about 8 bytes. One memory block can accomodate
203 * 23 occlusion queries. 512 of those blocks can support up to 11K occlusion
204 * queries. That seems reasonable for now. If we think this limit is
205 * not enough, we can increase the limit or try to grow the mob in runtime.
206 * Note, SVGA device does not impose one mob per context for queries,
207 * we could allocate multiple mobs for queries; however, wddm KMD does not
208 * currently support that.
210 * Also note that the GL guest driver does not issue any of the
211 * following commands: DXMoveQuery, DXBindAllQuery & DXReadbackAllQuery.
213 #define SVGA_QUERY_MEM_BLOCK_SIZE (sizeof(SVGADXQueryResultUnion) * 2)
214 #define SVGA_QUERY_MEM_SIZE (512 * SVGA_QUERY_MEM_BLOCK_SIZE)
216 struct svga_qmem_alloc_entry
218 unsigned start_offset
; /* start offset of the memory block */
219 unsigned block_index
; /* block index of the memory block */
220 unsigned query_size
; /* query size in this memory block */
221 unsigned nquery
; /* number of queries allocated */
222 struct util_bitmask
*alloc_mask
; /* allocation mask */
223 struct svga_qmem_alloc_entry
*next
; /* next memory block */
228 * Allocate a memory block from the query object memory
229 * \return NULL if out of memory, else pointer to the query memory block
231 static struct svga_qmem_alloc_entry
*
232 allocate_query_block(struct svga_context
*svga
)
236 struct svga_qmem_alloc_entry
*alloc_entry
= NULL
;
238 /* Find the next available query block */
239 index
= util_bitmask_add(svga
->gb_query_alloc_mask
);
241 if (index
== UTIL_BITMASK_INVALID_INDEX
)
244 offset
= index
* SVGA_QUERY_MEM_BLOCK_SIZE
;
245 if (offset
>= svga
->gb_query_len
) {
248 /* Deallocate the out-of-range index */
249 util_bitmask_clear(svga
->gb_query_alloc_mask
, index
);
253 * All the memory blocks are allocated, lets see if there is
254 * any empty memory block around that can be freed up.
256 for (i
= 0; i
< SVGA3D_QUERYTYPE_MAX
&& index
== -1; i
++) {
257 struct svga_qmem_alloc_entry
*prev_alloc_entry
= NULL
;
259 alloc_entry
= svga
->gb_query_map
[i
];
260 while (alloc_entry
&& index
== -1) {
261 if (alloc_entry
->nquery
== 0) {
262 /* This memory block is empty, it can be recycled. */
263 if (prev_alloc_entry
) {
264 prev_alloc_entry
->next
= alloc_entry
->next
;
266 svga
->gb_query_map
[i
] = alloc_entry
->next
;
268 index
= alloc_entry
->block_index
;
270 prev_alloc_entry
= alloc_entry
;
271 alloc_entry
= alloc_entry
->next
;
277 debug_printf("Query memory object is full\n");
284 alloc_entry
= CALLOC_STRUCT(svga_qmem_alloc_entry
);
285 alloc_entry
->block_index
= index
;
292 * Allocate a slot in the specified memory block.
293 * All slots in this memory block are of the same size.
295 * \return -1 if out of memory, else index of the query slot
298 allocate_query_slot(struct svga_context
*svga
,
299 struct svga_qmem_alloc_entry
*alloc
)
304 /* Find the next available slot */
305 index
= util_bitmask_add(alloc
->alloc_mask
);
307 if (index
== UTIL_BITMASK_INVALID_INDEX
)
310 offset
= index
* alloc
->query_size
;
311 if (offset
>= SVGA_QUERY_MEM_BLOCK_SIZE
)
320 * Deallocate the specified slot in the memory block.
321 * If all slots are freed up, then deallocate the memory block
322 * as well, so it can be allocated for other query type
325 deallocate_query_slot(struct svga_context
*svga
,
326 struct svga_qmem_alloc_entry
*alloc
,
329 assert(index
!= UTIL_BITMASK_INVALID_INDEX
);
331 util_bitmask_clear(alloc
->alloc_mask
, index
);
335 * Don't worry about deallocating the empty memory block here.
336 * The empty memory block will be recycled when no more memory block
341 static struct svga_qmem_alloc_entry
*
342 allocate_query_block_entry(struct svga_context
*svga
,
345 struct svga_qmem_alloc_entry
*alloc_entry
;
347 alloc_entry
= allocate_query_block(svga
);
351 assert(alloc_entry
->block_index
!= -1);
352 alloc_entry
->start_offset
=
353 alloc_entry
->block_index
* SVGA_QUERY_MEM_BLOCK_SIZE
;
354 alloc_entry
->nquery
= 0;
355 alloc_entry
->alloc_mask
= util_bitmask_create();
356 alloc_entry
->next
= NULL
;
357 alloc_entry
->query_size
= len
;
363 * Allocate a memory slot for a query of the specified type.
364 * It will first search through the memory blocks that are allocated
365 * for the query type. If no memory slot is available, it will try
366 * to allocate another memory block within the query object memory for
370 allocate_query(struct svga_context
*svga
,
371 SVGA3dQueryType type
,
374 struct svga_qmem_alloc_entry
*alloc_entry
;
378 assert(type
< SVGA3D_QUERYTYPE_MAX
);
380 alloc_entry
= svga
->gb_query_map
[type
];
384 * No query memory block has been allocated for this query type,
387 alloc_entry
= allocate_query_block_entry(svga
, len
);
390 svga
->gb_query_map
[type
] = alloc_entry
;
393 /* Allocate a slot within the memory block allocated for this query type */
394 slot_index
= allocate_query_slot(svga
, alloc_entry
);
396 if (slot_index
== -1) {
397 /* This query memory block is full, allocate another one */
398 alloc_entry
= allocate_query_block_entry(svga
, len
);
401 alloc_entry
->next
= svga
->gb_query_map
[type
];
402 svga
->gb_query_map
[type
] = alloc_entry
;
403 slot_index
= allocate_query_slot(svga
, alloc_entry
);
406 assert(slot_index
!= -1);
407 offset
= slot_index
* len
+ alloc_entry
->start_offset
;
414 * Deallocate memory slot allocated for the specified query
417 deallocate_query(struct svga_context
*svga
,
418 struct svga_query
*sq
)
420 struct svga_qmem_alloc_entry
*alloc_entry
;
422 unsigned offset
= sq
->offset
;
424 alloc_entry
= svga
->gb_query_map
[sq
->svga_type
];
426 while (alloc_entry
) {
427 if (offset
>= alloc_entry
->start_offset
&&
428 offset
< alloc_entry
->start_offset
+ SVGA_QUERY_MEM_BLOCK_SIZE
) {
430 /* The slot belongs to this memory block, deallocate it */
431 slot_index
= (offset
- alloc_entry
->start_offset
) /
432 alloc_entry
->query_size
;
433 deallocate_query_slot(svga
, alloc_entry
, slot_index
);
436 alloc_entry
= alloc_entry
->next
;
443 * Destroy the gb query object and all the related query structures
446 destroy_gb_query_obj(struct svga_context
*svga
)
448 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
451 for (i
= 0; i
< SVGA3D_QUERYTYPE_MAX
; i
++) {
452 struct svga_qmem_alloc_entry
*alloc_entry
, *next
;
453 alloc_entry
= svga
->gb_query_map
[i
];
454 while (alloc_entry
) {
455 next
= alloc_entry
->next
;
456 util_bitmask_destroy(alloc_entry
->alloc_mask
);
460 svga
->gb_query_map
[i
] = NULL
;
464 sws
->query_destroy(sws
, svga
->gb_query
);
465 svga
->gb_query
= NULL
;
467 util_bitmask_destroy(svga
->gb_query_alloc_mask
);
471 * Define query and create the gb query object if it is not already created.
472 * There is only one gb query object per context which will be shared by
473 * queries of all types.
475 static enum pipe_error
476 define_query_vgpu10(struct svga_context
*svga
,
477 struct svga_query
*sq
, int resultLen
)
479 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
481 enum pipe_error ret
= PIPE_OK
;
483 SVGA_DBG(DEBUG_QUERY
, "%s\n", __FUNCTION__
);
485 if (svga
->gb_query
== NULL
) {
486 /* Create a gb query object */
487 svga
->gb_query
= sws
->query_create(sws
, SVGA_QUERY_MEM_SIZE
);
489 return PIPE_ERROR_OUT_OF_MEMORY
;
490 svga
->gb_query_len
= SVGA_QUERY_MEM_SIZE
;
491 memset (svga
->gb_query_map
, 0, sizeof(svga
->gb_query_map
));
492 svga
->gb_query_alloc_mask
= util_bitmask_create();
494 /* Bind the query object to the context */
495 SVGA_RETRY(svga
, svga
->swc
->query_bind(svga
->swc
, svga
->gb_query
,
496 SVGA_QUERY_FLAG_SET
));
499 sq
->gb_query
= svga
->gb_query
;
501 /* Make sure query length is in multiples of 8 bytes */
502 qlen
= align(resultLen
+ sizeof(SVGA3dQueryState
), 8);
504 /* Find a slot for this query in the gb object */
505 sq
->offset
= allocate_query(svga
, sq
->svga_type
, qlen
);
506 if (sq
->offset
== -1)
507 return PIPE_ERROR_OUT_OF_MEMORY
;
509 assert((sq
->offset
& 7) == 0);
511 SVGA_DBG(DEBUG_QUERY
, " query type=%d qid=0x%x offset=%d\n",
512 sq
->svga_type
, sq
->id
, sq
->offset
);
515 * Send SVGA3D commands to define the query
517 SVGA_RETRY_OOM(svga
, ret
, SVGA3D_vgpu10_DefineQuery(svga
->swc
, sq
->id
,
521 return PIPE_ERROR_OUT_OF_MEMORY
;
523 SVGA_RETRY(svga
, SVGA3D_vgpu10_BindQuery(svga
->swc
, sq
->gb_query
, sq
->id
));
524 SVGA_RETRY(svga
, SVGA3D_vgpu10_SetQueryOffset(svga
->swc
, sq
->id
,
531 destroy_query_vgpu10(struct svga_context
*svga
, struct svga_query
*sq
)
533 SVGA_RETRY(svga
, SVGA3D_vgpu10_DestroyQuery(svga
->swc
, sq
->id
));
535 /* Deallocate the memory slot allocated for this query */
536 deallocate_query(svga
, sq
);
541 * Rebind queryies to the context.
544 rebind_vgpu10_query(struct svga_context
*svga
)
546 SVGA_RETRY(svga
, svga
->swc
->query_bind(svga
->swc
, svga
->gb_query
,
547 SVGA_QUERY_FLAG_REF
));
548 svga
->rebind
.flags
.query
= FALSE
;
552 static enum pipe_error
553 begin_query_vgpu10(struct svga_context
*svga
, struct svga_query
*sq
)
555 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
558 sws
->fence_reference(sws
, &sq
->fence
, NULL
);
560 /* Initialize the query state to NEW */
561 status
= sws
->query_init(sws
, sq
->gb_query
, sq
->offset
, SVGA3D_QUERYSTATE_NEW
);
565 if (svga
->rebind
.flags
.query
) {
566 rebind_vgpu10_query(svga
);
569 /* Send the BeginQuery command to the device */
570 SVGA_RETRY(svga
, SVGA3D_vgpu10_BeginQuery(svga
->swc
, sq
->id
));
575 end_query_vgpu10(struct svga_context
*svga
, struct svga_query
*sq
)
577 if (svga
->rebind
.flags
.query
) {
578 rebind_vgpu10_query(svga
);
581 SVGA_RETRY(svga
, SVGA3D_vgpu10_EndQuery(svga
->swc
, sq
->id
));
585 get_query_result_vgpu10(struct svga_context
*svga
, struct svga_query
*sq
,
586 bool wait
, void *result
, int resultLen
)
588 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
589 SVGA3dQueryState queryState
;
591 if (svga
->rebind
.flags
.query
) {
592 rebind_vgpu10_query(svga
);
595 sws
->query_get_result(sws
, sq
->gb_query
, sq
->offset
, &queryState
, result
, resultLen
);
597 if (queryState
!= SVGA3D_QUERYSTATE_SUCCEEDED
&& !sq
->fence
) {
598 /* We don't have the query result yet, and the query hasn't been
599 * submitted. We need to submit it now since the GL spec says
600 * "Querying the state for a given occlusion query forces that
601 * occlusion query to complete within a finite amount of time."
603 svga_context_flush(svga
, &sq
->fence
);
606 if (queryState
== SVGA3D_QUERYSTATE_PENDING
||
607 queryState
== SVGA3D_QUERYSTATE_NEW
) {
610 sws
->fence_finish(sws
, sq
->fence
, PIPE_TIMEOUT_INFINITE
,
611 SVGA_FENCE_FLAG_QUERY
);
612 sws
->query_get_result(sws
, sq
->gb_query
, sq
->offset
, &queryState
, result
, resultLen
);
615 assert(queryState
== SVGA3D_QUERYSTATE_SUCCEEDED
||
616 queryState
== SVGA3D_QUERYSTATE_FAILED
);
621 static struct pipe_query
*
622 svga_create_query(struct pipe_context
*pipe
,
626 struct svga_context
*svga
= svga_context(pipe
);
627 struct svga_query
*sq
;
630 assert(query_type
< SVGA_QUERY_MAX
);
632 sq
= CALLOC_STRUCT(svga_query
);
636 /* Allocate an integer ID for the query */
637 sq
->id
= util_bitmask_add(svga
->query_id_bm
);
638 if (sq
->id
== UTIL_BITMASK_INVALID_INDEX
)
641 SVGA_DBG(DEBUG_QUERY
, "%s type=%d sq=0x%x id=%d\n", __FUNCTION__
,
642 query_type
, sq
, sq
->id
);
644 switch (query_type
) {
645 case PIPE_QUERY_OCCLUSION_COUNTER
:
646 sq
->svga_type
= SVGA3D_QUERYTYPE_OCCLUSION
;
647 if (svga_have_vgpu10(svga
)) {
648 ret
= define_query_vgpu10(svga
, sq
,
649 sizeof(SVGADXOcclusionQueryResult
));
654 * In OpenGL, occlusion counter query can be used in conditional
655 * rendering; however, in DX10, only OCCLUSION_PREDICATE query can
656 * be used for predication. Hence, we need to create an occlusion
657 * predicate query along with the occlusion counter query. So when
658 * the occlusion counter query is used for predication, the associated
659 * query of occlusion predicate type will be used
660 * in the SetPredication command.
662 sq
->predicate
= svga_create_query(pipe
, PIPE_QUERY_OCCLUSION_PREDICATE
, index
);
665 ret
= define_query_vgpu9(svga
, sq
);
670 case PIPE_QUERY_OCCLUSION_PREDICATE
:
671 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
672 if (svga_have_vgpu10(svga
)) {
673 sq
->svga_type
= SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE
;
674 ret
= define_query_vgpu10(svga
, sq
,
675 sizeof(SVGADXOcclusionPredicateQueryResult
));
679 sq
->svga_type
= SVGA3D_QUERYTYPE_OCCLUSION
;
680 ret
= define_query_vgpu9(svga
, sq
);
685 case PIPE_QUERY_PRIMITIVES_GENERATED
:
686 case PIPE_QUERY_PRIMITIVES_EMITTED
:
687 case PIPE_QUERY_SO_STATISTICS
:
688 assert(svga_have_vgpu10(svga
));
690 /* Until the device supports the new query type for multiple streams,
691 * we will use the single stream query type for stream 0.
693 if (svga_have_sm5(svga
) && index
> 0) {
696 sq
->svga_type
= SVGA3D_QUERYTYPE_SOSTATS_STREAM0
+ index
;
700 sq
->svga_type
= SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS
;
702 ret
= define_query_vgpu10(svga
, sq
,
703 sizeof(SVGADXStreamOutStatisticsQueryResult
));
707 case PIPE_QUERY_TIMESTAMP
:
708 assert(svga_have_vgpu10(svga
));
709 sq
->svga_type
= SVGA3D_QUERYTYPE_TIMESTAMP
;
710 ret
= define_query_vgpu10(svga
, sq
,
711 sizeof(SVGADXTimestampQueryResult
));
715 case SVGA_QUERY_NUM_DRAW_CALLS
:
716 case SVGA_QUERY_NUM_FALLBACKS
:
717 case SVGA_QUERY_NUM_FLUSHES
:
718 case SVGA_QUERY_NUM_VALIDATIONS
:
719 case SVGA_QUERY_NUM_BUFFERS_MAPPED
:
720 case SVGA_QUERY_NUM_TEXTURES_MAPPED
:
721 case SVGA_QUERY_NUM_BYTES_UPLOADED
:
722 case SVGA_QUERY_NUM_COMMAND_BUFFERS
:
723 case SVGA_QUERY_COMMAND_BUFFER_SIZE
:
724 case SVGA_QUERY_SURFACE_WRITE_FLUSHES
:
725 case SVGA_QUERY_MEMORY_USED
:
726 case SVGA_QUERY_NUM_SHADERS
:
727 case SVGA_QUERY_NUM_RESOURCES
:
728 case SVGA_QUERY_NUM_STATE_OBJECTS
:
729 case SVGA_QUERY_NUM_SURFACE_VIEWS
:
730 case SVGA_QUERY_NUM_GENERATE_MIPMAP
:
731 case SVGA_QUERY_NUM_READBACKS
:
732 case SVGA_QUERY_NUM_RESOURCE_UPDATES
:
733 case SVGA_QUERY_NUM_BUFFER_UPLOADS
:
734 case SVGA_QUERY_NUM_CONST_BUF_UPDATES
:
735 case SVGA_QUERY_NUM_CONST_UPDATES
:
736 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS
:
737 case SVGA_QUERY_NUM_COMMANDS_PER_DRAW
:
738 case SVGA_QUERY_NUM_SHADER_RELOCATIONS
:
739 case SVGA_QUERY_NUM_SURFACE_RELOCATIONS
:
740 case SVGA_QUERY_SHADER_MEM_USED
:
742 case SVGA_QUERY_FLUSH_TIME
:
743 case SVGA_QUERY_MAP_BUFFER_TIME
:
744 /* These queries need os_time_get() */
745 svga
->hud
.uses_time
= TRUE
;
749 assert(!"unexpected query type in svga_create_query()");
752 sq
->type
= query_type
;
762 svga_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
764 struct svga_context
*svga
= svga_context(pipe
);
765 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
766 struct svga_query
*sq
;
769 destroy_gb_query_obj(svga
);
775 SVGA_DBG(DEBUG_QUERY
, "%s sq=0x%x id=%d\n", __FUNCTION__
,
779 case PIPE_QUERY_OCCLUSION_COUNTER
:
780 case PIPE_QUERY_OCCLUSION_PREDICATE
:
781 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
782 if (svga_have_vgpu10(svga
)) {
783 /* make sure to also destroy any associated predicate query */
785 svga_destroy_query(pipe
, sq
->predicate
);
786 destroy_query_vgpu10(svga
, sq
);
788 sws
->buffer_destroy(sws
, sq
->hwbuf
);
790 sws
->fence_reference(sws
, &sq
->fence
, NULL
);
792 case PIPE_QUERY_PRIMITIVES_GENERATED
:
793 case PIPE_QUERY_PRIMITIVES_EMITTED
:
794 case PIPE_QUERY_SO_STATISTICS
:
795 case PIPE_QUERY_TIMESTAMP
:
796 assert(svga_have_vgpu10(svga
));
797 destroy_query_vgpu10(svga
, sq
);
798 sws
->fence_reference(sws
, &sq
->fence
, NULL
);
800 case SVGA_QUERY_NUM_DRAW_CALLS
:
801 case SVGA_QUERY_NUM_FALLBACKS
:
802 case SVGA_QUERY_NUM_FLUSHES
:
803 case SVGA_QUERY_NUM_VALIDATIONS
:
804 case SVGA_QUERY_MAP_BUFFER_TIME
:
805 case SVGA_QUERY_NUM_BUFFERS_MAPPED
:
806 case SVGA_QUERY_NUM_TEXTURES_MAPPED
:
807 case SVGA_QUERY_NUM_BYTES_UPLOADED
:
808 case SVGA_QUERY_NUM_COMMAND_BUFFERS
:
809 case SVGA_QUERY_COMMAND_BUFFER_SIZE
:
810 case SVGA_QUERY_FLUSH_TIME
:
811 case SVGA_QUERY_SURFACE_WRITE_FLUSHES
:
812 case SVGA_QUERY_MEMORY_USED
:
813 case SVGA_QUERY_NUM_SHADERS
:
814 case SVGA_QUERY_NUM_RESOURCES
:
815 case SVGA_QUERY_NUM_STATE_OBJECTS
:
816 case SVGA_QUERY_NUM_SURFACE_VIEWS
:
817 case SVGA_QUERY_NUM_GENERATE_MIPMAP
:
818 case SVGA_QUERY_NUM_READBACKS
:
819 case SVGA_QUERY_NUM_RESOURCE_UPDATES
:
820 case SVGA_QUERY_NUM_BUFFER_UPLOADS
:
821 case SVGA_QUERY_NUM_CONST_BUF_UPDATES
:
822 case SVGA_QUERY_NUM_CONST_UPDATES
:
823 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS
:
824 case SVGA_QUERY_NUM_COMMANDS_PER_DRAW
:
825 case SVGA_QUERY_NUM_SHADER_RELOCATIONS
:
826 case SVGA_QUERY_NUM_SURFACE_RELOCATIONS
:
827 case SVGA_QUERY_SHADER_MEM_USED
:
831 assert(!"svga: unexpected query type in svga_destroy_query()");
834 /* Free the query id */
835 util_bitmask_clear(svga
->query_id_bm
, sq
->id
);
842 svga_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
844 struct svga_context
*svga
= svga_context(pipe
);
845 struct svga_query
*sq
= svga_query(q
);
846 enum pipe_error ret
= PIPE_OK
;
849 assert(sq
->type
< SVGA_QUERY_MAX
);
851 /* Need to flush out buffered drawing commands so that they don't
852 * get counted in the query results.
854 svga_hwtnl_flush_retry(svga
);
857 case PIPE_QUERY_OCCLUSION_COUNTER
:
858 case PIPE_QUERY_OCCLUSION_PREDICATE
:
859 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
860 if (svga_have_vgpu10(svga
)) {
861 ret
= begin_query_vgpu10(svga
, sq
);
862 /* also need to start the associated occlusion predicate query */
864 enum pipe_error status
;
865 status
= begin_query_vgpu10(svga
, svga_query(sq
->predicate
));
866 assert(status
== PIPE_OK
);
870 begin_query_vgpu9(svga
, sq
);
872 assert(ret
== PIPE_OK
);
875 case PIPE_QUERY_PRIMITIVES_GENERATED
:
876 case PIPE_QUERY_PRIMITIVES_EMITTED
:
877 case PIPE_QUERY_SO_STATISTICS
:
878 case PIPE_QUERY_TIMESTAMP
:
879 assert(svga_have_vgpu10(svga
));
880 ret
= begin_query_vgpu10(svga
, sq
);
881 assert(ret
== PIPE_OK
);
883 case SVGA_QUERY_NUM_DRAW_CALLS
:
884 sq
->begin_count
= svga
->hud
.num_draw_calls
;
886 case SVGA_QUERY_NUM_FALLBACKS
:
887 sq
->begin_count
= svga
->hud
.num_fallbacks
;
889 case SVGA_QUERY_NUM_FLUSHES
:
890 sq
->begin_count
= svga
->hud
.num_flushes
;
892 case SVGA_QUERY_NUM_VALIDATIONS
:
893 sq
->begin_count
= svga
->hud
.num_validations
;
895 case SVGA_QUERY_MAP_BUFFER_TIME
:
896 sq
->begin_count
= svga
->hud
.map_buffer_time
;
898 case SVGA_QUERY_NUM_BUFFERS_MAPPED
:
899 sq
->begin_count
= svga
->hud
.num_buffers_mapped
;
901 case SVGA_QUERY_NUM_TEXTURES_MAPPED
:
902 sq
->begin_count
= svga
->hud
.num_textures_mapped
;
904 case SVGA_QUERY_NUM_BYTES_UPLOADED
:
905 sq
->begin_count
= svga
->hud
.num_bytes_uploaded
;
907 case SVGA_QUERY_NUM_COMMAND_BUFFERS
:
908 sq
->begin_count
= svga
->swc
->num_command_buffers
;
910 case SVGA_QUERY_COMMAND_BUFFER_SIZE
:
911 sq
->begin_count
= svga
->hud
.command_buffer_size
;
913 case SVGA_QUERY_FLUSH_TIME
:
914 sq
->begin_count
= svga
->hud
.flush_time
;
916 case SVGA_QUERY_SURFACE_WRITE_FLUSHES
:
917 sq
->begin_count
= svga
->hud
.surface_write_flushes
;
919 case SVGA_QUERY_NUM_READBACKS
:
920 sq
->begin_count
= svga
->hud
.num_readbacks
;
922 case SVGA_QUERY_NUM_RESOURCE_UPDATES
:
923 sq
->begin_count
= svga
->hud
.num_resource_updates
;
925 case SVGA_QUERY_NUM_BUFFER_UPLOADS
:
926 sq
->begin_count
= svga
->hud
.num_buffer_uploads
;
928 case SVGA_QUERY_NUM_CONST_BUF_UPDATES
:
929 sq
->begin_count
= svga
->hud
.num_const_buf_updates
;
931 case SVGA_QUERY_NUM_CONST_UPDATES
:
932 sq
->begin_count
= svga
->hud
.num_const_updates
;
934 case SVGA_QUERY_NUM_SHADER_RELOCATIONS
:
935 sq
->begin_count
= svga
->swc
->num_shader_reloc
;
937 case SVGA_QUERY_NUM_SURFACE_RELOCATIONS
:
938 sq
->begin_count
= svga
->swc
->num_surf_reloc
;
940 case SVGA_QUERY_MEMORY_USED
:
941 case SVGA_QUERY_NUM_SHADERS
:
942 case SVGA_QUERY_NUM_RESOURCES
:
943 case SVGA_QUERY_NUM_STATE_OBJECTS
:
944 case SVGA_QUERY_NUM_SURFACE_VIEWS
:
945 case SVGA_QUERY_NUM_GENERATE_MIPMAP
:
946 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS
:
947 case SVGA_QUERY_NUM_COMMANDS_PER_DRAW
:
948 case SVGA_QUERY_SHADER_MEM_USED
:
952 assert(!"unexpected query type in svga_begin_query()");
955 SVGA_DBG(DEBUG_QUERY
, "%s sq=0x%x id=%d type=%d svga_type=%d\n",
956 __FUNCTION__
, sq
, sq
->id
, sq
->type
, sq
->svga_type
);
965 svga_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
967 struct svga_context
*svga
= svga_context(pipe
);
968 struct svga_query
*sq
= svga_query(q
);
971 assert(sq
->type
< SVGA_QUERY_MAX
);
973 SVGA_DBG(DEBUG_QUERY
, "%s sq=0x%x type=%d\n",
974 __FUNCTION__
, sq
, sq
->type
);
976 if (sq
->type
== PIPE_QUERY_TIMESTAMP
&& !sq
->active
)
977 svga_begin_query(pipe
, q
);
979 SVGA_DBG(DEBUG_QUERY
, "%s sq=0x%x id=%d type=%d svga_type=%d\n",
980 __FUNCTION__
, sq
, sq
->id
, sq
->type
, sq
->svga_type
);
982 svga_hwtnl_flush_retry(svga
);
987 case PIPE_QUERY_OCCLUSION_COUNTER
:
988 case PIPE_QUERY_OCCLUSION_PREDICATE
:
989 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
990 if (svga_have_vgpu10(svga
)) {
991 end_query_vgpu10(svga
, sq
);
992 /* also need to end the associated occlusion predicate query */
994 end_query_vgpu10(svga
, svga_query(sq
->predicate
));
997 end_query_vgpu9(svga
, sq
);
1000 case PIPE_QUERY_PRIMITIVES_GENERATED
:
1001 case PIPE_QUERY_PRIMITIVES_EMITTED
:
1002 case PIPE_QUERY_SO_STATISTICS
:
1003 case PIPE_QUERY_TIMESTAMP
:
1004 assert(svga_have_vgpu10(svga
));
1005 end_query_vgpu10(svga
, sq
);
1007 case SVGA_QUERY_NUM_DRAW_CALLS
:
1008 sq
->end_count
= svga
->hud
.num_draw_calls
;
1010 case SVGA_QUERY_NUM_FALLBACKS
:
1011 sq
->end_count
= svga
->hud
.num_fallbacks
;
1013 case SVGA_QUERY_NUM_FLUSHES
:
1014 sq
->end_count
= svga
->hud
.num_flushes
;
1016 case SVGA_QUERY_NUM_VALIDATIONS
:
1017 sq
->end_count
= svga
->hud
.num_validations
;
1019 case SVGA_QUERY_MAP_BUFFER_TIME
:
1020 sq
->end_count
= svga
->hud
.map_buffer_time
;
1022 case SVGA_QUERY_NUM_BUFFERS_MAPPED
:
1023 sq
->end_count
= svga
->hud
.num_buffers_mapped
;
1025 case SVGA_QUERY_NUM_TEXTURES_MAPPED
:
1026 sq
->end_count
= svga
->hud
.num_textures_mapped
;
1028 case SVGA_QUERY_NUM_BYTES_UPLOADED
:
1029 sq
->end_count
= svga
->hud
.num_bytes_uploaded
;
1031 case SVGA_QUERY_NUM_COMMAND_BUFFERS
:
1032 sq
->end_count
= svga
->swc
->num_command_buffers
;
1034 case SVGA_QUERY_COMMAND_BUFFER_SIZE
:
1035 sq
->end_count
= svga
->hud
.command_buffer_size
;
1037 case SVGA_QUERY_FLUSH_TIME
:
1038 sq
->end_count
= svga
->hud
.flush_time
;
1040 case SVGA_QUERY_SURFACE_WRITE_FLUSHES
:
1041 sq
->end_count
= svga
->hud
.surface_write_flushes
;
1043 case SVGA_QUERY_NUM_READBACKS
:
1044 sq
->end_count
= svga
->hud
.num_readbacks
;
1046 case SVGA_QUERY_NUM_RESOURCE_UPDATES
:
1047 sq
->end_count
= svga
->hud
.num_resource_updates
;
1049 case SVGA_QUERY_NUM_BUFFER_UPLOADS
:
1050 sq
->end_count
= svga
->hud
.num_buffer_uploads
;
1052 case SVGA_QUERY_NUM_CONST_BUF_UPDATES
:
1053 sq
->end_count
= svga
->hud
.num_const_buf_updates
;
1055 case SVGA_QUERY_NUM_CONST_UPDATES
:
1056 sq
->end_count
= svga
->hud
.num_const_updates
;
1058 case SVGA_QUERY_NUM_SHADER_RELOCATIONS
:
1059 sq
->end_count
= svga
->swc
->num_shader_reloc
;
1061 case SVGA_QUERY_NUM_SURFACE_RELOCATIONS
:
1062 sq
->end_count
= svga
->swc
->num_surf_reloc
;
1064 case SVGA_QUERY_MEMORY_USED
:
1065 case SVGA_QUERY_NUM_SHADERS
:
1066 case SVGA_QUERY_NUM_RESOURCES
:
1067 case SVGA_QUERY_NUM_STATE_OBJECTS
:
1068 case SVGA_QUERY_NUM_SURFACE_VIEWS
:
1069 case SVGA_QUERY_NUM_GENERATE_MIPMAP
:
1070 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS
:
1071 case SVGA_QUERY_NUM_COMMANDS_PER_DRAW
:
1072 case SVGA_QUERY_SHADER_MEM_USED
:
1076 assert(!"unexpected query type in svga_end_query()");
1084 svga_get_query_result(struct pipe_context
*pipe
,
1085 struct pipe_query
*q
,
1087 union pipe_query_result
*vresult
)
1089 struct svga_screen
*svgascreen
= svga_screen(pipe
->screen
);
1090 struct svga_context
*svga
= svga_context(pipe
);
1091 struct svga_query
*sq
= svga_query(q
);
1092 uint64_t *result
= (uint64_t *)vresult
;
1097 SVGA_DBG(DEBUG_QUERY
, "%s sq=0x%x id=%d wait: %d\n",
1098 __FUNCTION__
, sq
, sq
->id
, wait
);
1101 case PIPE_QUERY_OCCLUSION_COUNTER
:
1102 if (svga_have_vgpu10(svga
)) {
1103 SVGADXOcclusionQueryResult occResult
;
1104 ret
= get_query_result_vgpu10(svga
, sq
, wait
,
1105 (void *)&occResult
, sizeof(occResult
));
1106 *result
= (uint64_t)occResult
.samplesRendered
;
1108 ret
= get_query_result_vgpu9(svga
, sq
, wait
, result
);
1111 case PIPE_QUERY_OCCLUSION_PREDICATE
:
1112 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
: {
1113 if (svga_have_vgpu10(svga
)) {
1114 SVGADXOcclusionPredicateQueryResult occResult
;
1115 ret
= get_query_result_vgpu10(svga
, sq
, wait
,
1116 (void *)&occResult
, sizeof(occResult
));
1117 vresult
->b
= occResult
.anySamplesRendered
!= 0;
1120 ret
= get_query_result_vgpu9(svga
, sq
, wait
, &count
);
1121 vresult
->b
= count
!= 0;
1125 case PIPE_QUERY_SO_STATISTICS
: {
1126 SVGADXStreamOutStatisticsQueryResult sResult
;
1127 struct pipe_query_data_so_statistics
*pResult
=
1128 (struct pipe_query_data_so_statistics
*)vresult
;
1130 assert(svga_have_vgpu10(svga
));
1131 ret
= get_query_result_vgpu10(svga
, sq
, wait
,
1132 (void *)&sResult
, sizeof(sResult
));
1133 pResult
->num_primitives_written
= sResult
.numPrimitivesWritten
;
1134 pResult
->primitives_storage_needed
= sResult
.numPrimitivesRequired
;
1137 case PIPE_QUERY_TIMESTAMP
: {
1138 SVGADXTimestampQueryResult sResult
;
1140 assert(svga_have_vgpu10(svga
));
1141 ret
= get_query_result_vgpu10(svga
, sq
, wait
,
1142 (void *)&sResult
, sizeof(sResult
));
1143 *result
= (uint64_t)sResult
.timestamp
;
1146 case PIPE_QUERY_PRIMITIVES_GENERATED
: {
1147 SVGADXStreamOutStatisticsQueryResult sResult
;
1149 assert(svga_have_vgpu10(svga
));
1150 ret
= get_query_result_vgpu10(svga
, sq
, wait
,
1151 (void *)&sResult
, sizeof sResult
);
1152 *result
= (uint64_t)sResult
.numPrimitivesRequired
;
1155 case PIPE_QUERY_PRIMITIVES_EMITTED
: {
1156 SVGADXStreamOutStatisticsQueryResult sResult
;
1158 assert(svga_have_vgpu10(svga
));
1159 ret
= get_query_result_vgpu10(svga
, sq
, wait
,
1160 (void *)&sResult
, sizeof sResult
);
1161 *result
= (uint64_t)sResult
.numPrimitivesWritten
;
1164 /* These are per-frame counters */
1165 case SVGA_QUERY_NUM_DRAW_CALLS
:
1166 case SVGA_QUERY_NUM_FALLBACKS
:
1167 case SVGA_QUERY_NUM_FLUSHES
:
1168 case SVGA_QUERY_NUM_VALIDATIONS
:
1169 case SVGA_QUERY_MAP_BUFFER_TIME
:
1170 case SVGA_QUERY_NUM_BUFFERS_MAPPED
:
1171 case SVGA_QUERY_NUM_TEXTURES_MAPPED
:
1172 case SVGA_QUERY_NUM_BYTES_UPLOADED
:
1173 case SVGA_QUERY_NUM_COMMAND_BUFFERS
:
1174 case SVGA_QUERY_COMMAND_BUFFER_SIZE
:
1175 case SVGA_QUERY_FLUSH_TIME
:
1176 case SVGA_QUERY_SURFACE_WRITE_FLUSHES
:
1177 case SVGA_QUERY_NUM_READBACKS
:
1178 case SVGA_QUERY_NUM_RESOURCE_UPDATES
:
1179 case SVGA_QUERY_NUM_BUFFER_UPLOADS
:
1180 case SVGA_QUERY_NUM_CONST_BUF_UPDATES
:
1181 case SVGA_QUERY_NUM_CONST_UPDATES
:
1182 case SVGA_QUERY_NUM_SHADER_RELOCATIONS
:
1183 case SVGA_QUERY_NUM_SURFACE_RELOCATIONS
:
1184 vresult
->u64
= sq
->end_count
- sq
->begin_count
;
1186 /* These are running total counters */
1187 case SVGA_QUERY_MEMORY_USED
:
1188 vresult
->u64
= svgascreen
->hud
.total_resource_bytes
;
1190 case SVGA_QUERY_NUM_SHADERS
:
1191 vresult
->u64
= svga
->hud
.num_shaders
;
1193 case SVGA_QUERY_NUM_RESOURCES
:
1194 vresult
->u64
= svgascreen
->hud
.num_resources
;
1196 case SVGA_QUERY_NUM_STATE_OBJECTS
:
1197 vresult
->u64
= (svga
->hud
.num_blend_objects
+
1198 svga
->hud
.num_depthstencil_objects
+
1199 svga
->hud
.num_rasterizer_objects
+
1200 svga
->hud
.num_sampler_objects
+
1201 svga
->hud
.num_samplerview_objects
+
1202 svga
->hud
.num_vertexelement_objects
);
1204 case SVGA_QUERY_NUM_SURFACE_VIEWS
:
1205 vresult
->u64
= svga
->hud
.num_surface_views
;
1207 case SVGA_QUERY_NUM_GENERATE_MIPMAP
:
1208 vresult
->u64
= svga
->hud
.num_generate_mipmap
;
1210 case SVGA_QUERY_NUM_FAILED_ALLOCATIONS
:
1211 vresult
->u64
= svgascreen
->hud
.num_failed_allocations
;
1213 case SVGA_QUERY_NUM_COMMANDS_PER_DRAW
:
1214 vresult
->f
= (float) svga
->swc
->num_commands
1215 / (float) svga
->swc
->num_draw_commands
;
1217 case SVGA_QUERY_SHADER_MEM_USED
:
1218 vresult
->u64
= svga
->hud
.shader_mem_used
;
1221 assert(!"unexpected query type in svga_get_query_result");
1224 SVGA_DBG(DEBUG_QUERY
, "%s result %d\n", __FUNCTION__
, *((uint64_t *)vresult
));
1230 svga_render_condition(struct pipe_context
*pipe
, struct pipe_query
*q
,
1231 bool condition
, enum pipe_render_cond_flag mode
)
1233 struct svga_context
*svga
= svga_context(pipe
);
1234 struct svga_winsys_screen
*sws
= svga_screen(svga
->pipe
.screen
)->sws
;
1235 struct svga_query
*sq
= svga_query(q
);
1236 SVGA3dQueryId queryId
;
1238 SVGA_DBG(DEBUG_QUERY
, "%s\n", __FUNCTION__
);
1240 assert(svga_have_vgpu10(svga
));
1242 queryId
= SVGA3D_INVALID_ID
;
1245 assert(sq
->svga_type
== SVGA3D_QUERYTYPE_OCCLUSION
||
1246 sq
->svga_type
== SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE
);
1248 if (sq
->svga_type
== SVGA3D_QUERYTYPE_OCCLUSION
) {
1249 assert(sq
->predicate
);
1251 * For conditional rendering, make sure to use the associated
1254 sq
= svga_query(sq
->predicate
);
1258 if ((mode
== PIPE_RENDER_COND_WAIT
||
1259 mode
== PIPE_RENDER_COND_BY_REGION_WAIT
) && sq
->fence
) {
1260 sws
->fence_finish(sws
, sq
->fence
, PIPE_TIMEOUT_INFINITE
,
1261 SVGA_FENCE_FLAG_QUERY
);
1265 * if the kernel module doesn't support the predication command,
1266 * we'll just render unconditionally.
1267 * This is probably acceptable for the typical case of occlusion culling.
1269 if (sws
->have_set_predication_cmd
) {
1270 SVGA_RETRY(svga
, SVGA3D_vgpu10_SetPredication(svga
->swc
, queryId
,
1271 (uint32
) condition
));
1272 svga
->pred
.query_id
= queryId
;
1273 svga
->pred
.cond
= condition
;
1276 svga
->render_condition
= (sq
!= NULL
);
1281 * This function is a workaround because we lack the ability to query
1282 * renderer's time synchornously.
1285 svga_get_timestamp(struct pipe_context
*pipe
)
1287 struct pipe_query
*q
= svga_create_query(pipe
, PIPE_QUERY_TIMESTAMP
, 0);
1288 union pipe_query_result result
;
1290 svga_begin_query(pipe
, q
);
1291 svga_end_query(pipe
,q
);
1292 svga_get_query_result(pipe
, q
, TRUE
, &result
);
1293 svga_destroy_query(pipe
, q
);
1300 svga_set_active_query_state(struct pipe_context
*pipe
, bool enable
)
1306 * \brief Toggle conditional rendering if already enabled
1308 * \param svga[in] The svga context
1309 * \param render_condition_enabled[in] Whether to ignore requests to turn
1310 * conditional rendering off
1311 * \param on[in] Whether to turn conditional rendering on or off
1314 svga_toggle_render_condition(struct svga_context
*svga
,
1315 boolean render_condition_enabled
,
1318 SVGA3dQueryId query_id
;
1320 if (render_condition_enabled
||
1321 svga
->pred
.query_id
== SVGA3D_INVALID_ID
) {
1326 * If we get here, it means that the system supports
1327 * conditional rendering since svga->pred.query_id has already been
1328 * modified for this context and thus support has already been
1331 query_id
= on
? svga
->pred
.query_id
: SVGA3D_INVALID_ID
;
1333 SVGA_RETRY(svga
, SVGA3D_vgpu10_SetPredication(svga
->swc
, query_id
,
1334 (uint32
) svga
->pred
.cond
));
1339 svga_init_query_functions(struct svga_context
*svga
)
1341 svga
->pipe
.create_query
= svga_create_query
;
1342 svga
->pipe
.destroy_query
= svga_destroy_query
;
1343 svga
->pipe
.begin_query
= svga_begin_query
;
1344 svga
->pipe
.end_query
= svga_end_query
;
1345 svga
->pipe
.get_query_result
= svga_get_query_result
;
1346 svga
->pipe
.set_active_query_state
= svga_set_active_query_state
;
1347 svga
->pipe
.render_condition
= svga_render_condition
;
1348 svga
->pipe
.get_timestamp
= svga_get_timestamp
;