1 /**************************************************************************
3 * Copyright 2007 VMware, Inc.
4 * Copyright 2010 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Keith Whitwell, Qicheng Christopher Li, Brian Paul
33 #include "draw/draw_context.h"
34 #include "pipe/p_defines.h"
35 #include "util/u_memory.h"
36 #include "util/os_time.h"
37 #include "lp_context.h"
41 #include "lp_screen.h"
46 static struct llvmpipe_query
*llvmpipe_query( struct pipe_query
*p
)
48 return (struct llvmpipe_query
*)p
;
51 static struct pipe_query
*
52 llvmpipe_create_query(struct pipe_context
*pipe
,
56 struct llvmpipe_query
*pq
;
58 assert(type
< PIPE_QUERY_TYPES
);
60 pq
= CALLOC_STRUCT( llvmpipe_query
);
67 return (struct pipe_query
*) pq
;
72 llvmpipe_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
74 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
76 /* Ideally we would refcount queries & not get destroyed until the
77 * last scene had finished with us.
80 if (!lp_fence_issued(pq
->fence
))
81 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
83 if (!lp_fence_signalled(pq
->fence
))
84 lp_fence_wait(pq
->fence
);
86 lp_fence_reference(&pq
->fence
, NULL
);
94 llvmpipe_get_query_result(struct pipe_context
*pipe
,
97 union pipe_query_result
*vresult
)
99 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
100 unsigned num_threads
= MAX2(1, screen
->num_threads
);
101 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
102 uint64_t *result
= (uint64_t *)vresult
;
106 /* only have a fence if there was a scene */
107 if (!lp_fence_signalled(pq
->fence
)) {
108 if (!lp_fence_issued(pq
->fence
))
109 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
114 lp_fence_wait(pq
->fence
);
118 /* Sum the results from each of the threads:
123 case PIPE_QUERY_OCCLUSION_COUNTER
:
124 for (i
= 0; i
< num_threads
; i
++) {
125 *result
+= pq
->end
[i
];
128 case PIPE_QUERY_OCCLUSION_PREDICATE
:
129 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
130 for (i
= 0; i
< num_threads
; i
++) {
131 /* safer (still not guaranteed) when there's an overflow */
132 vresult
->b
= vresult
->b
|| pq
->end
[i
];
135 case PIPE_QUERY_TIMESTAMP
:
136 for (i
= 0; i
< num_threads
; i
++) {
137 if (pq
->end
[i
] > *result
) {
138 *result
= pq
->end
[i
];
142 case PIPE_QUERY_TIMESTAMP_DISJOINT
: {
143 struct pipe_query_data_timestamp_disjoint
*td
=
144 (struct pipe_query_data_timestamp_disjoint
*)vresult
;
145 /* os_get_time_nano return nanoseconds */
146 td
->frequency
= UINT64_C(1000000000);
147 td
->disjoint
= false;
150 case PIPE_QUERY_GPU_FINISHED
:
153 case PIPE_QUERY_PRIMITIVES_GENERATED
:
154 *result
= pq
->num_primitives_generated
;
156 case PIPE_QUERY_PRIMITIVES_EMITTED
:
157 *result
= pq
->num_primitives_written
;
159 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
160 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
161 vresult
->b
= pq
->num_primitives_generated
> pq
->num_primitives_written
;
163 case PIPE_QUERY_SO_STATISTICS
: {
164 struct pipe_query_data_so_statistics
*stats
=
165 (struct pipe_query_data_so_statistics
*)vresult
;
166 stats
->num_primitives_written
= pq
->num_primitives_written
;
167 stats
->primitives_storage_needed
= pq
->num_primitives_generated
;
170 case PIPE_QUERY_PIPELINE_STATISTICS
: {
171 struct pipe_query_data_pipeline_statistics
*stats
=
172 (struct pipe_query_data_pipeline_statistics
*)vresult
;
173 /* only ps_invocations come from binned query */
174 for (i
= 0; i
< num_threads
; i
++) {
175 pq
->stats
.ps_invocations
+= pq
->end
[i
];
177 pq
->stats
.ps_invocations
*= LP_RASTER_BLOCK_SIZE
* LP_RASTER_BLOCK_SIZE
;
190 llvmpipe_get_query_result_resource(struct pipe_context
*pipe
,
191 struct pipe_query
*q
,
193 enum pipe_query_value_type result_type
,
195 struct pipe_resource
*resource
,
198 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
199 unsigned num_threads
= MAX2(1, screen
->num_threads
);
200 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
201 struct llvmpipe_resource
*lpr
= llvmpipe_resource(resource
);
202 bool unflushed
= false;
203 bool unsignalled
= false;
205 /* only have a fence if there was a scene */
206 if (!lp_fence_signalled(pq
->fence
)) {
208 if (!lp_fence_issued(pq
->fence
))
224 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
229 lp_fence_wait(pq
->fence
);
233 case PIPE_QUERY_OCCLUSION_COUNTER
:
234 for (i
= 0; i
< num_threads
; i
++) {
238 case PIPE_QUERY_OCCLUSION_PREDICATE
:
239 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
240 for (i
= 0; i
< num_threads
; i
++) {
241 /* safer (still not guaranteed) when there's an overflow */
242 value
= value
|| pq
->end
[i
];
245 case PIPE_QUERY_PRIMITIVES_GENERATED
:
246 value
= pq
->num_primitives_generated
;
248 case PIPE_QUERY_PRIMITIVES_EMITTED
:
249 value
= pq
->num_primitives_written
;
251 case PIPE_QUERY_TIMESTAMP
:
252 for (i
= 0; i
< num_threads
; i
++) {
253 if (pq
->end
[i
] > value
) {
258 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
259 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
260 value
= !!(pq
->num_primitives_generated
> pq
->num_primitives_written
);
262 case PIPE_QUERY_PIPELINE_STATISTICS
:
263 switch ((enum pipe_statistics_query_index
)index
) {
264 case PIPE_STAT_QUERY_IA_VERTICES
:
265 value
= pq
->stats
.ia_vertices
;
267 case PIPE_STAT_QUERY_IA_PRIMITIVES
:
268 value
= pq
->stats
.ia_primitives
;
270 case PIPE_STAT_QUERY_VS_INVOCATIONS
:
271 value
= pq
->stats
.vs_invocations
;
273 case PIPE_STAT_QUERY_GS_INVOCATIONS
:
274 value
= pq
->stats
.gs_invocations
;
276 case PIPE_STAT_QUERY_GS_PRIMITIVES
:
277 value
= pq
->stats
.gs_primitives
;
279 case PIPE_STAT_QUERY_C_INVOCATIONS
:
280 value
= pq
->stats
.c_invocations
;
282 case PIPE_STAT_QUERY_C_PRIMITIVES
:
283 value
= pq
->stats
.c_primitives
;
285 case PIPE_STAT_QUERY_PS_INVOCATIONS
:
287 for (i
= 0; i
< num_threads
; i
++) {
290 value
*= LP_RASTER_BLOCK_SIZE
* LP_RASTER_BLOCK_SIZE
;
292 case PIPE_STAT_QUERY_HS_INVOCATIONS
:
293 value
= pq
->stats
.hs_invocations
;
295 case PIPE_STAT_QUERY_DS_INVOCATIONS
:
296 value
= pq
->stats
.ds_invocations
;
298 case PIPE_STAT_QUERY_CS_INVOCATIONS
:
299 value
= pq
->stats
.cs_invocations
;
304 fprintf(stderr
, "Unknown query type %d\n", pq
->type
);
309 void *dst
= (uint8_t *)lpr
->data
+ offset
;
310 switch (result_type
) {
311 case PIPE_QUERY_TYPE_I32
: {
312 int32_t *iptr
= (int32_t *)dst
;
313 if (value
> 0x7fffffff)
316 *iptr
= (int32_t)value
;
319 case PIPE_QUERY_TYPE_U32
: {
320 uint32_t *uptr
= (uint32_t *)dst
;
321 if (value
> 0xffffffff)
324 *uptr
= (uint32_t)value
;
327 case PIPE_QUERY_TYPE_I64
: {
328 int64_t *iptr
= (int64_t *)dst
;
329 *iptr
= (int64_t)value
;
332 case PIPE_QUERY_TYPE_U64
: {
333 uint64_t *uptr
= (uint64_t *)dst
;
334 *uptr
= (uint64_t)value
;
341 llvmpipe_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
343 struct llvmpipe_context
*llvmpipe
= llvmpipe_context( pipe
);
344 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
346 /* Check if the query is already in the scene. If so, we need to
347 * flush the scene now. Real apps shouldn't re-use a query in a
348 * frame of rendering.
350 if (pq
->fence
&& !lp_fence_issued(pq
->fence
)) {
351 llvmpipe_finish(pipe
, __FUNCTION__
);
355 memset(pq
->start
, 0, sizeof(pq
->start
));
356 memset(pq
->end
, 0, sizeof(pq
->end
));
357 lp_setup_begin_query(llvmpipe
->setup
, pq
);
360 case PIPE_QUERY_PRIMITIVES_EMITTED
:
361 pq
->num_primitives_written
= llvmpipe
->so_stats
[pq
->index
].num_primitives_written
;
363 case PIPE_QUERY_PRIMITIVES_GENERATED
:
364 pq
->num_primitives_generated
= llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
;
365 llvmpipe
->active_primgen_queries
++;
367 case PIPE_QUERY_SO_STATISTICS
:
368 pq
->num_primitives_written
= llvmpipe
->so_stats
[pq
->index
].num_primitives_written
;
369 pq
->num_primitives_generated
= llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
;
371 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
372 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
373 pq
->num_primitives_written
= llvmpipe
->so_stats
[pq
->index
].num_primitives_written
;
374 pq
->num_primitives_generated
= llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
;
376 case PIPE_QUERY_PIPELINE_STATISTICS
:
377 /* reset our cache */
378 if (llvmpipe
->active_statistics_queries
== 0) {
379 memset(&llvmpipe
->pipeline_statistics
, 0,
380 sizeof(llvmpipe
->pipeline_statistics
));
382 memcpy(&pq
->stats
, &llvmpipe
->pipeline_statistics
, sizeof(pq
->stats
));
383 llvmpipe
->active_statistics_queries
++;
385 case PIPE_QUERY_OCCLUSION_COUNTER
:
386 case PIPE_QUERY_OCCLUSION_PREDICATE
:
387 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
388 llvmpipe
->active_occlusion_queries
++;
389 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
399 llvmpipe_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
401 struct llvmpipe_context
*llvmpipe
= llvmpipe_context( pipe
);
402 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
404 lp_setup_end_query(llvmpipe
->setup
, pq
);
408 case PIPE_QUERY_PRIMITIVES_EMITTED
:
409 pq
->num_primitives_written
=
410 llvmpipe
->so_stats
[pq
->index
].num_primitives_written
- pq
->num_primitives_written
;
412 case PIPE_QUERY_PRIMITIVES_GENERATED
:
413 assert(llvmpipe
->active_primgen_queries
);
414 llvmpipe
->active_primgen_queries
--;
415 pq
->num_primitives_generated
=
416 llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
- pq
->num_primitives_generated
;
418 case PIPE_QUERY_SO_STATISTICS
:
419 pq
->num_primitives_written
=
420 llvmpipe
->so_stats
[pq
->index
].num_primitives_written
- pq
->num_primitives_written
;
421 pq
->num_primitives_generated
=
422 llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
- pq
->num_primitives_generated
;
424 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
425 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
426 pq
->num_primitives_written
=
427 llvmpipe
->so_stats
[pq
->index
].num_primitives_written
- pq
->num_primitives_written
;
428 pq
->num_primitives_generated
=
429 llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
- pq
->num_primitives_generated
;
431 case PIPE_QUERY_PIPELINE_STATISTICS
:
432 pq
->stats
.ia_vertices
=
433 llvmpipe
->pipeline_statistics
.ia_vertices
- pq
->stats
.ia_vertices
;
434 pq
->stats
.ia_primitives
=
435 llvmpipe
->pipeline_statistics
.ia_primitives
- pq
->stats
.ia_primitives
;
436 pq
->stats
.vs_invocations
=
437 llvmpipe
->pipeline_statistics
.vs_invocations
- pq
->stats
.vs_invocations
;
438 pq
->stats
.gs_invocations
=
439 llvmpipe
->pipeline_statistics
.gs_invocations
- pq
->stats
.gs_invocations
;
440 pq
->stats
.gs_primitives
=
441 llvmpipe
->pipeline_statistics
.gs_primitives
- pq
->stats
.gs_primitives
;
442 pq
->stats
.c_invocations
=
443 llvmpipe
->pipeline_statistics
.c_invocations
- pq
->stats
.c_invocations
;
444 pq
->stats
.c_primitives
=
445 llvmpipe
->pipeline_statistics
.c_primitives
- pq
->stats
.c_primitives
;
446 pq
->stats
.ps_invocations
=
447 llvmpipe
->pipeline_statistics
.ps_invocations
- pq
->stats
.ps_invocations
;
448 pq
->stats
.cs_invocations
=
449 llvmpipe
->pipeline_statistics
.cs_invocations
- pq
->stats
.cs_invocations
;
450 llvmpipe
->active_statistics_queries
--;
452 case PIPE_QUERY_OCCLUSION_COUNTER
:
453 case PIPE_QUERY_OCCLUSION_PREDICATE
:
454 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
455 assert(llvmpipe
->active_occlusion_queries
);
456 llvmpipe
->active_occlusion_queries
--;
457 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
467 llvmpipe_check_render_cond(struct llvmpipe_context
*lp
)
469 struct pipe_context
*pipe
= &lp
->pipe
;
473 if (!lp
->render_cond_query
)
474 return TRUE
; /* no query predicate, draw normally */
476 wait
= (lp
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
477 lp
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
);
479 b
= pipe
->get_query_result(pipe
, lp
->render_cond_query
, wait
, (void*)&result
);
481 return ((!result
) == lp
->render_cond_cond
);
487 llvmpipe_set_active_query_state(struct pipe_context
*pipe
, bool enable
)
489 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
491 llvmpipe
->queries_disabled
= !enable
;
492 /* for OQs we need to regenerate the fragment shader */
493 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
496 void llvmpipe_init_query_funcs(struct llvmpipe_context
*llvmpipe
)
498 llvmpipe
->pipe
.create_query
= llvmpipe_create_query
;
499 llvmpipe
->pipe
.destroy_query
= llvmpipe_destroy_query
;
500 llvmpipe
->pipe
.begin_query
= llvmpipe_begin_query
;
501 llvmpipe
->pipe
.end_query
= llvmpipe_end_query
;
502 llvmpipe
->pipe
.get_query_result
= llvmpipe_get_query_result
;
503 llvmpipe
->pipe
.get_query_result_resource
= llvmpipe_get_query_result_resource
;
504 llvmpipe
->pipe
.set_active_query_state
= llvmpipe_set_active_query_state
;