1 /**************************************************************************
3 * Copyright 2007 VMware, Inc.
4 * Copyright 2010 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Keith Whitwell, Qicheng Christopher Li, Brian Paul
33 #include "draw/draw_context.h"
34 #include "pipe/p_defines.h"
35 #include "util/u_memory.h"
36 #include "util/os_time.h"
37 #include "lp_context.h"
41 #include "lp_screen.h"
46 static struct llvmpipe_query
*llvmpipe_query( struct pipe_query
*p
)
48 return (struct llvmpipe_query
*)p
;
51 static struct pipe_query
*
52 llvmpipe_create_query(struct pipe_context
*pipe
,
56 struct llvmpipe_query
*pq
;
58 assert(type
< PIPE_QUERY_TYPES
);
60 pq
= CALLOC_STRUCT( llvmpipe_query
);
66 return (struct pipe_query
*) pq
;
71 llvmpipe_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
73 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
75 /* Ideally we would refcount queries & not get destroyed until the
76 * last scene had finished with us.
79 if (!lp_fence_issued(pq
->fence
))
80 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
82 if (!lp_fence_signalled(pq
->fence
))
83 lp_fence_wait(pq
->fence
);
85 lp_fence_reference(&pq
->fence
, NULL
);
93 llvmpipe_get_query_result(struct pipe_context
*pipe
,
96 union pipe_query_result
*vresult
)
98 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
99 unsigned num_threads
= MAX2(1, screen
->num_threads
);
100 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
101 uint64_t *result
= (uint64_t *)vresult
;
105 /* only have a fence if there was a scene */
106 if (!lp_fence_signalled(pq
->fence
)) {
107 if (!lp_fence_issued(pq
->fence
))
108 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
113 lp_fence_wait(pq
->fence
);
117 /* Sum the results from each of the threads:
122 case PIPE_QUERY_OCCLUSION_COUNTER
:
123 for (i
= 0; i
< num_threads
; i
++) {
124 *result
+= pq
->end
[i
];
127 case PIPE_QUERY_OCCLUSION_PREDICATE
:
128 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
129 for (i
= 0; i
< num_threads
; i
++) {
130 /* safer (still not guaranteed) when there's an overflow */
131 vresult
->b
= vresult
->b
|| pq
->end
[i
];
134 case PIPE_QUERY_TIMESTAMP
:
135 for (i
= 0; i
< num_threads
; i
++) {
136 if (pq
->end
[i
] > *result
) {
137 *result
= pq
->end
[i
];
141 case PIPE_QUERY_TIMESTAMP_DISJOINT
: {
142 struct pipe_query_data_timestamp_disjoint
*td
=
143 (struct pipe_query_data_timestamp_disjoint
*)vresult
;
144 /* os_get_time_nano return nanoseconds */
145 td
->frequency
= UINT64_C(1000000000);
146 td
->disjoint
= false;
149 case PIPE_QUERY_GPU_FINISHED
:
152 case PIPE_QUERY_PRIMITIVES_GENERATED
:
153 *result
= pq
->num_primitives_generated
;
155 case PIPE_QUERY_PRIMITIVES_EMITTED
:
156 *result
= pq
->num_primitives_written
;
158 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
159 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
160 vresult
->b
= pq
->num_primitives_generated
> pq
->num_primitives_written
;
162 case PIPE_QUERY_SO_STATISTICS
: {
163 struct pipe_query_data_so_statistics
*stats
=
164 (struct pipe_query_data_so_statistics
*)vresult
;
165 stats
->num_primitives_written
= pq
->num_primitives_written
;
166 stats
->primitives_storage_needed
= pq
->num_primitives_generated
;
169 case PIPE_QUERY_PIPELINE_STATISTICS
: {
170 struct pipe_query_data_pipeline_statistics
*stats
=
171 (struct pipe_query_data_pipeline_statistics
*)vresult
;
172 /* only ps_invocations come from binned query */
173 for (i
= 0; i
< num_threads
; i
++) {
174 pq
->stats
.ps_invocations
+= pq
->end
[i
];
176 pq
->stats
.ps_invocations
*= LP_RASTER_BLOCK_SIZE
* LP_RASTER_BLOCK_SIZE
;
189 llvmpipe_get_query_result_resource(struct pipe_context
*pipe
,
190 struct pipe_query
*q
,
192 enum pipe_query_value_type result_type
,
194 struct pipe_resource
*resource
,
197 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
198 unsigned num_threads
= MAX2(1, screen
->num_threads
);
199 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
200 struct llvmpipe_resource
*lpr
= llvmpipe_resource(resource
);
201 bool unflushed
= false;
202 bool unsignalled
= false;
204 /* only have a fence if there was a scene */
205 if (!lp_fence_signalled(pq
->fence
)) {
207 if (!lp_fence_issued(pq
->fence
))
223 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
228 lp_fence_wait(pq
->fence
);
232 case PIPE_QUERY_OCCLUSION_COUNTER
:
233 for (i
= 0; i
< num_threads
; i
++) {
237 case PIPE_QUERY_OCCLUSION_PREDICATE
:
238 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
239 for (i
= 0; i
< num_threads
; i
++) {
240 /* safer (still not guaranteed) when there's an overflow */
241 value
= value
|| pq
->end
[i
];
244 case PIPE_QUERY_PRIMITIVES_GENERATED
:
245 value
= pq
->num_primitives_generated
;
247 case PIPE_QUERY_PRIMITIVES_EMITTED
:
248 value
= pq
->num_primitives_written
;
250 case PIPE_QUERY_TIMESTAMP
:
251 for (i
= 0; i
< num_threads
; i
++) {
252 if (pq
->end
[i
] > value
) {
257 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
258 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
259 value
= !!(pq
->num_primitives_generated
> pq
->num_primitives_written
);
261 case PIPE_QUERY_PIPELINE_STATISTICS
:
262 switch ((enum pipe_statistics_query_index
)index
) {
263 case PIPE_STAT_QUERY_IA_VERTICES
:
264 value
= pq
->stats
.ia_vertices
;
266 case PIPE_STAT_QUERY_IA_PRIMITIVES
:
267 value
= pq
->stats
.ia_primitives
;
269 case PIPE_STAT_QUERY_VS_INVOCATIONS
:
270 value
= pq
->stats
.vs_invocations
;
272 case PIPE_STAT_QUERY_GS_INVOCATIONS
:
273 value
= pq
->stats
.gs_invocations
;
275 case PIPE_STAT_QUERY_GS_PRIMITIVES
:
276 value
= pq
->stats
.gs_primitives
;
278 case PIPE_STAT_QUERY_C_INVOCATIONS
:
279 value
= pq
->stats
.c_invocations
;
281 case PIPE_STAT_QUERY_C_PRIMITIVES
:
282 value
= pq
->stats
.c_primitives
;
284 case PIPE_STAT_QUERY_PS_INVOCATIONS
:
286 for (i
= 0; i
< num_threads
; i
++) {
289 value
*= LP_RASTER_BLOCK_SIZE
* LP_RASTER_BLOCK_SIZE
;
291 case PIPE_STAT_QUERY_HS_INVOCATIONS
:
292 value
= pq
->stats
.hs_invocations
;
294 case PIPE_STAT_QUERY_DS_INVOCATIONS
:
295 value
= pq
->stats
.ds_invocations
;
297 case PIPE_STAT_QUERY_CS_INVOCATIONS
:
298 value
= pq
->stats
.cs_invocations
;
303 fprintf(stderr
, "Unknown query type %d\n", pq
->type
);
308 void *dst
= (uint8_t *)lpr
->data
+ offset
;
309 switch (result_type
) {
310 case PIPE_QUERY_TYPE_I32
: {
311 int32_t *iptr
= (int32_t *)dst
;
312 if (value
> 0x7fffffff)
315 *iptr
= (int32_t)value
;
318 case PIPE_QUERY_TYPE_U32
: {
319 uint32_t *uptr
= (uint32_t *)dst
;
320 if (value
> 0xffffffff)
323 *uptr
= (uint32_t)value
;
326 case PIPE_QUERY_TYPE_I64
: {
327 int64_t *iptr
= (int64_t *)dst
;
328 *iptr
= (int64_t)value
;
331 case PIPE_QUERY_TYPE_U64
: {
332 uint64_t *uptr
= (uint64_t *)dst
;
333 *uptr
= (uint64_t)value
;
340 llvmpipe_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
342 struct llvmpipe_context
*llvmpipe
= llvmpipe_context( pipe
);
343 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
345 /* Check if the query is already in the scene. If so, we need to
346 * flush the scene now. Real apps shouldn't re-use a query in a
347 * frame of rendering.
349 if (pq
->fence
&& !lp_fence_issued(pq
->fence
)) {
350 llvmpipe_finish(pipe
, __FUNCTION__
);
354 memset(pq
->start
, 0, sizeof(pq
->start
));
355 memset(pq
->end
, 0, sizeof(pq
->end
));
356 lp_setup_begin_query(llvmpipe
->setup
, pq
);
359 case PIPE_QUERY_PRIMITIVES_EMITTED
:
360 pq
->num_primitives_written
= llvmpipe
->so_stats
.num_primitives_written
;
362 case PIPE_QUERY_PRIMITIVES_GENERATED
:
363 pq
->num_primitives_generated
= llvmpipe
->so_stats
.primitives_storage_needed
;
364 llvmpipe
->active_primgen_queries
++;
366 case PIPE_QUERY_SO_STATISTICS
:
367 pq
->num_primitives_written
= llvmpipe
->so_stats
.num_primitives_written
;
368 pq
->num_primitives_generated
= llvmpipe
->so_stats
.primitives_storage_needed
;
370 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
371 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
372 pq
->num_primitives_written
= llvmpipe
->so_stats
.num_primitives_written
;
373 pq
->num_primitives_generated
= llvmpipe
->so_stats
.primitives_storage_needed
;
375 case PIPE_QUERY_PIPELINE_STATISTICS
:
376 /* reset our cache */
377 if (llvmpipe
->active_statistics_queries
== 0) {
378 memset(&llvmpipe
->pipeline_statistics
, 0,
379 sizeof(llvmpipe
->pipeline_statistics
));
381 memcpy(&pq
->stats
, &llvmpipe
->pipeline_statistics
, sizeof(pq
->stats
));
382 llvmpipe
->active_statistics_queries
++;
384 case PIPE_QUERY_OCCLUSION_COUNTER
:
385 case PIPE_QUERY_OCCLUSION_PREDICATE
:
386 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
387 llvmpipe
->active_occlusion_queries
++;
388 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
398 llvmpipe_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
400 struct llvmpipe_context
*llvmpipe
= llvmpipe_context( pipe
);
401 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
403 lp_setup_end_query(llvmpipe
->setup
, pq
);
407 case PIPE_QUERY_PRIMITIVES_EMITTED
:
408 pq
->num_primitives_written
=
409 llvmpipe
->so_stats
.num_primitives_written
- pq
->num_primitives_written
;
411 case PIPE_QUERY_PRIMITIVES_GENERATED
:
412 assert(llvmpipe
->active_primgen_queries
);
413 llvmpipe
->active_primgen_queries
--;
414 pq
->num_primitives_generated
=
415 llvmpipe
->so_stats
.primitives_storage_needed
- pq
->num_primitives_generated
;
417 case PIPE_QUERY_SO_STATISTICS
:
418 pq
->num_primitives_written
=
419 llvmpipe
->so_stats
.num_primitives_written
- pq
->num_primitives_written
;
420 pq
->num_primitives_generated
=
421 llvmpipe
->so_stats
.primitives_storage_needed
- pq
->num_primitives_generated
;
423 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
424 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
425 pq
->num_primitives_written
=
426 llvmpipe
->so_stats
.num_primitives_written
- pq
->num_primitives_written
;
427 pq
->num_primitives_generated
=
428 llvmpipe
->so_stats
.primitives_storage_needed
- pq
->num_primitives_generated
;
430 case PIPE_QUERY_PIPELINE_STATISTICS
:
431 pq
->stats
.ia_vertices
=
432 llvmpipe
->pipeline_statistics
.ia_vertices
- pq
->stats
.ia_vertices
;
433 pq
->stats
.ia_primitives
=
434 llvmpipe
->pipeline_statistics
.ia_primitives
- pq
->stats
.ia_primitives
;
435 pq
->stats
.vs_invocations
=
436 llvmpipe
->pipeline_statistics
.vs_invocations
- pq
->stats
.vs_invocations
;
437 pq
->stats
.gs_invocations
=
438 llvmpipe
->pipeline_statistics
.gs_invocations
- pq
->stats
.gs_invocations
;
439 pq
->stats
.gs_primitives
=
440 llvmpipe
->pipeline_statistics
.gs_primitives
- pq
->stats
.gs_primitives
;
441 pq
->stats
.c_invocations
=
442 llvmpipe
->pipeline_statistics
.c_invocations
- pq
->stats
.c_invocations
;
443 pq
->stats
.c_primitives
=
444 llvmpipe
->pipeline_statistics
.c_primitives
- pq
->stats
.c_primitives
;
445 pq
->stats
.ps_invocations
=
446 llvmpipe
->pipeline_statistics
.ps_invocations
- pq
->stats
.ps_invocations
;
447 pq
->stats
.cs_invocations
=
448 llvmpipe
->pipeline_statistics
.cs_invocations
- pq
->stats
.cs_invocations
;
449 llvmpipe
->active_statistics_queries
--;
451 case PIPE_QUERY_OCCLUSION_COUNTER
:
452 case PIPE_QUERY_OCCLUSION_PREDICATE
:
453 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
454 assert(llvmpipe
->active_occlusion_queries
);
455 llvmpipe
->active_occlusion_queries
--;
456 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
466 llvmpipe_check_render_cond(struct llvmpipe_context
*lp
)
468 struct pipe_context
*pipe
= &lp
->pipe
;
472 if (!lp
->render_cond_query
)
473 return TRUE
; /* no query predicate, draw normally */
475 wait
= (lp
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
476 lp
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
);
478 b
= pipe
->get_query_result(pipe
, lp
->render_cond_query
, wait
, (void*)&result
);
480 return ((!result
) == lp
->render_cond_cond
);
486 llvmpipe_set_active_query_state(struct pipe_context
*pipe
, bool enable
)
488 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
490 llvmpipe
->queries_disabled
= !enable
;
491 /* for OQs we need to regenerate the fragment shader */
492 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
495 void llvmpipe_init_query_funcs(struct llvmpipe_context
*llvmpipe
)
497 llvmpipe
->pipe
.create_query
= llvmpipe_create_query
;
498 llvmpipe
->pipe
.destroy_query
= llvmpipe_destroy_query
;
499 llvmpipe
->pipe
.begin_query
= llvmpipe_begin_query
;
500 llvmpipe
->pipe
.end_query
= llvmpipe_end_query
;
501 llvmpipe
->pipe
.get_query_result
= llvmpipe_get_query_result
;
502 llvmpipe
->pipe
.get_query_result_resource
= llvmpipe_get_query_result_resource
;
503 llvmpipe
->pipe
.set_active_query_state
= llvmpipe_set_active_query_state
;