1 /**************************************************************************
3 * Copyright 2007 VMware, Inc.
4 * Copyright 2010 VMware, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Keith Whitwell, Qicheng Christopher Li, Brian Paul
33 #include "draw/draw_context.h"
34 #include "pipe/p_defines.h"
35 #include "util/u_memory.h"
36 #include "util/os_time.h"
37 #include "lp_context.h"
41 #include "lp_screen.h"
46 static struct llvmpipe_query
*llvmpipe_query( struct pipe_query
*p
)
48 return (struct llvmpipe_query
*)p
;
51 static struct pipe_query
*
52 llvmpipe_create_query(struct pipe_context
*pipe
,
56 struct llvmpipe_query
*pq
;
58 assert(type
< PIPE_QUERY_TYPES
);
60 pq
= CALLOC_STRUCT( llvmpipe_query
);
67 return (struct pipe_query
*) pq
;
72 llvmpipe_destroy_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
74 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
76 /* Ideally we would refcount queries & not get destroyed until the
77 * last scene had finished with us.
80 if (!lp_fence_issued(pq
->fence
))
81 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
83 if (!lp_fence_signalled(pq
->fence
))
84 lp_fence_wait(pq
->fence
);
86 lp_fence_reference(&pq
->fence
, NULL
);
94 llvmpipe_get_query_result(struct pipe_context
*pipe
,
97 union pipe_query_result
*vresult
)
99 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
100 unsigned num_threads
= MAX2(1, screen
->num_threads
);
101 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
102 uint64_t *result
= (uint64_t *)vresult
;
106 /* only have a fence if there was a scene */
107 if (!lp_fence_signalled(pq
->fence
)) {
108 if (!lp_fence_issued(pq
->fence
))
109 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
114 lp_fence_wait(pq
->fence
);
118 /* Sum the results from each of the threads:
123 case PIPE_QUERY_OCCLUSION_COUNTER
:
124 for (i
= 0; i
< num_threads
; i
++) {
125 *result
+= pq
->end
[i
];
128 case PIPE_QUERY_OCCLUSION_PREDICATE
:
129 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
130 for (i
= 0; i
< num_threads
; i
++) {
131 /* safer (still not guaranteed) when there's an overflow */
132 vresult
->b
= vresult
->b
|| pq
->end
[i
];
135 case PIPE_QUERY_TIMESTAMP
:
136 for (i
= 0; i
< num_threads
; i
++) {
137 if (pq
->end
[i
] > *result
) {
138 *result
= pq
->end
[i
];
142 case PIPE_QUERY_TIMESTAMP_DISJOINT
: {
143 struct pipe_query_data_timestamp_disjoint
*td
=
144 (struct pipe_query_data_timestamp_disjoint
*)vresult
;
145 /* os_get_time_nano return nanoseconds */
146 td
->frequency
= UINT64_C(1000000000);
147 td
->disjoint
= false;
150 case PIPE_QUERY_GPU_FINISHED
:
153 case PIPE_QUERY_PRIMITIVES_GENERATED
:
154 *result
= pq
->num_primitives_generated
[0];
156 case PIPE_QUERY_PRIMITIVES_EMITTED
:
157 *result
= pq
->num_primitives_written
[0];
159 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
161 for (unsigned s
= 0; s
< PIPE_MAX_VERTEX_STREAMS
; s
++)
162 vresult
->b
|= pq
->num_primitives_generated
[s
] > pq
->num_primitives_written
[s
];
164 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
165 vresult
->b
= pq
->num_primitives_generated
[0] > pq
->num_primitives_written
[0];
167 case PIPE_QUERY_SO_STATISTICS
: {
168 struct pipe_query_data_so_statistics
*stats
=
169 (struct pipe_query_data_so_statistics
*)vresult
;
170 stats
->num_primitives_written
= pq
->num_primitives_written
[0];
171 stats
->primitives_storage_needed
= pq
->num_primitives_generated
[0];
174 case PIPE_QUERY_PIPELINE_STATISTICS
: {
175 struct pipe_query_data_pipeline_statistics
*stats
=
176 (struct pipe_query_data_pipeline_statistics
*)vresult
;
177 /* only ps_invocations come from binned query */
178 for (i
= 0; i
< num_threads
; i
++) {
179 pq
->stats
.ps_invocations
+= pq
->end
[i
];
181 pq
->stats
.ps_invocations
*= LP_RASTER_BLOCK_SIZE
* LP_RASTER_BLOCK_SIZE
;
194 llvmpipe_get_query_result_resource(struct pipe_context
*pipe
,
195 struct pipe_query
*q
,
197 enum pipe_query_value_type result_type
,
199 struct pipe_resource
*resource
,
202 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
203 unsigned num_threads
= MAX2(1, screen
->num_threads
);
204 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
205 struct llvmpipe_resource
*lpr
= llvmpipe_resource(resource
);
206 bool unflushed
= false;
207 bool unsignalled
= false;
209 /* only have a fence if there was a scene */
210 if (!lp_fence_signalled(pq
->fence
)) {
212 if (!lp_fence_issued(pq
->fence
))
228 llvmpipe_flush(pipe
, NULL
, __FUNCTION__
);
233 lp_fence_wait(pq
->fence
);
237 case PIPE_QUERY_OCCLUSION_COUNTER
:
238 for (i
= 0; i
< num_threads
; i
++) {
242 case PIPE_QUERY_OCCLUSION_PREDICATE
:
243 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
244 for (i
= 0; i
< num_threads
; i
++) {
245 /* safer (still not guaranteed) when there's an overflow */
246 value
= value
|| pq
->end
[i
];
249 case PIPE_QUERY_PRIMITIVES_GENERATED
:
250 value
= pq
->num_primitives_generated
[0];
252 case PIPE_QUERY_PRIMITIVES_EMITTED
:
253 value
= pq
->num_primitives_written
[0];
255 case PIPE_QUERY_TIMESTAMP
:
256 for (i
= 0; i
< num_threads
; i
++) {
257 if (pq
->end
[i
] > value
) {
262 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
264 for (unsigned s
= 0; s
< PIPE_MAX_VERTEX_STREAMS
; s
++)
265 value
|= !!(pq
->num_primitives_generated
[s
] > pq
->num_primitives_written
[s
]);
267 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
268 value
= !!(pq
->num_primitives_generated
[0] > pq
->num_primitives_written
[0]);
270 case PIPE_QUERY_PIPELINE_STATISTICS
:
271 switch ((enum pipe_statistics_query_index
)index
) {
272 case PIPE_STAT_QUERY_IA_VERTICES
:
273 value
= pq
->stats
.ia_vertices
;
275 case PIPE_STAT_QUERY_IA_PRIMITIVES
:
276 value
= pq
->stats
.ia_primitives
;
278 case PIPE_STAT_QUERY_VS_INVOCATIONS
:
279 value
= pq
->stats
.vs_invocations
;
281 case PIPE_STAT_QUERY_GS_INVOCATIONS
:
282 value
= pq
->stats
.gs_invocations
;
284 case PIPE_STAT_QUERY_GS_PRIMITIVES
:
285 value
= pq
->stats
.gs_primitives
;
287 case PIPE_STAT_QUERY_C_INVOCATIONS
:
288 value
= pq
->stats
.c_invocations
;
290 case PIPE_STAT_QUERY_C_PRIMITIVES
:
291 value
= pq
->stats
.c_primitives
;
293 case PIPE_STAT_QUERY_PS_INVOCATIONS
:
295 for (i
= 0; i
< num_threads
; i
++) {
298 value
*= LP_RASTER_BLOCK_SIZE
* LP_RASTER_BLOCK_SIZE
;
300 case PIPE_STAT_QUERY_HS_INVOCATIONS
:
301 value
= pq
->stats
.hs_invocations
;
303 case PIPE_STAT_QUERY_DS_INVOCATIONS
:
304 value
= pq
->stats
.ds_invocations
;
306 case PIPE_STAT_QUERY_CS_INVOCATIONS
:
307 value
= pq
->stats
.cs_invocations
;
312 fprintf(stderr
, "Unknown query type %d\n", pq
->type
);
317 void *dst
= (uint8_t *)lpr
->data
+ offset
;
318 switch (result_type
) {
319 case PIPE_QUERY_TYPE_I32
: {
320 int32_t *iptr
= (int32_t *)dst
;
321 if (value
> 0x7fffffff)
324 *iptr
= (int32_t)value
;
327 case PIPE_QUERY_TYPE_U32
: {
328 uint32_t *uptr
= (uint32_t *)dst
;
329 if (value
> 0xffffffff)
332 *uptr
= (uint32_t)value
;
335 case PIPE_QUERY_TYPE_I64
: {
336 int64_t *iptr
= (int64_t *)dst
;
337 *iptr
= (int64_t)value
;
340 case PIPE_QUERY_TYPE_U64
: {
341 uint64_t *uptr
= (uint64_t *)dst
;
342 *uptr
= (uint64_t)value
;
349 llvmpipe_begin_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
351 struct llvmpipe_context
*llvmpipe
= llvmpipe_context( pipe
);
352 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
354 /* Check if the query is already in the scene. If so, we need to
355 * flush the scene now. Real apps shouldn't re-use a query in a
356 * frame of rendering.
358 if (pq
->fence
&& !lp_fence_issued(pq
->fence
)) {
359 llvmpipe_finish(pipe
, __FUNCTION__
);
363 memset(pq
->start
, 0, sizeof(pq
->start
));
364 memset(pq
->end
, 0, sizeof(pq
->end
));
365 lp_setup_begin_query(llvmpipe
->setup
, pq
);
368 case PIPE_QUERY_PRIMITIVES_EMITTED
:
369 pq
->num_primitives_written
[0] = llvmpipe
->so_stats
[pq
->index
].num_primitives_written
;
371 case PIPE_QUERY_PRIMITIVES_GENERATED
:
372 pq
->num_primitives_generated
[0] = llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
;
373 llvmpipe
->active_primgen_queries
++;
375 case PIPE_QUERY_SO_STATISTICS
:
376 pq
->num_primitives_written
[0] = llvmpipe
->so_stats
[pq
->index
].num_primitives_written
;
377 pq
->num_primitives_generated
[0] = llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
;
379 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
380 for (unsigned s
= 0; s
< PIPE_MAX_VERTEX_STREAMS
; s
++) {
381 pq
->num_primitives_written
[s
] = llvmpipe
->so_stats
[s
].num_primitives_written
;
382 pq
->num_primitives_generated
[s
] = llvmpipe
->so_stats
[s
].primitives_storage_needed
;
385 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
386 pq
->num_primitives_written
[0] = llvmpipe
->so_stats
[pq
->index
].num_primitives_written
;
387 pq
->num_primitives_generated
[0] = llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
;
389 case PIPE_QUERY_PIPELINE_STATISTICS
:
390 /* reset our cache */
391 if (llvmpipe
->active_statistics_queries
== 0) {
392 memset(&llvmpipe
->pipeline_statistics
, 0,
393 sizeof(llvmpipe
->pipeline_statistics
));
395 memcpy(&pq
->stats
, &llvmpipe
->pipeline_statistics
, sizeof(pq
->stats
));
396 llvmpipe
->active_statistics_queries
++;
398 case PIPE_QUERY_OCCLUSION_COUNTER
:
399 case PIPE_QUERY_OCCLUSION_PREDICATE
:
400 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
401 llvmpipe
->active_occlusion_queries
++;
402 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
412 llvmpipe_end_query(struct pipe_context
*pipe
, struct pipe_query
*q
)
414 struct llvmpipe_context
*llvmpipe
= llvmpipe_context( pipe
);
415 struct llvmpipe_query
*pq
= llvmpipe_query(q
);
417 lp_setup_end_query(llvmpipe
->setup
, pq
);
421 case PIPE_QUERY_PRIMITIVES_EMITTED
:
422 pq
->num_primitives_written
[0] =
423 llvmpipe
->so_stats
[pq
->index
].num_primitives_written
- pq
->num_primitives_written
[0];
425 case PIPE_QUERY_PRIMITIVES_GENERATED
:
426 assert(llvmpipe
->active_primgen_queries
);
427 llvmpipe
->active_primgen_queries
--;
428 pq
->num_primitives_generated
[0] =
429 llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
- pq
->num_primitives_generated
[0];
431 case PIPE_QUERY_SO_STATISTICS
:
432 pq
->num_primitives_written
[0] =
433 llvmpipe
->so_stats
[pq
->index
].num_primitives_written
- pq
->num_primitives_written
[0];
434 pq
->num_primitives_generated
[0] =
435 llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
- pq
->num_primitives_generated
[0];
437 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE
:
438 for (unsigned s
= 0; s
< PIPE_MAX_VERTEX_STREAMS
; s
++) {
439 pq
->num_primitives_written
[s
] =
440 llvmpipe
->so_stats
[s
].num_primitives_written
- pq
->num_primitives_written
[s
];
441 pq
->num_primitives_generated
[s
] =
442 llvmpipe
->so_stats
[s
].primitives_storage_needed
- pq
->num_primitives_generated
[s
];
445 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
446 pq
->num_primitives_written
[0] =
447 llvmpipe
->so_stats
[pq
->index
].num_primitives_written
- pq
->num_primitives_written
[0];
448 pq
->num_primitives_generated
[0] =
449 llvmpipe
->so_stats
[pq
->index
].primitives_storage_needed
- pq
->num_primitives_generated
[0];
451 case PIPE_QUERY_PIPELINE_STATISTICS
:
452 pq
->stats
.ia_vertices
=
453 llvmpipe
->pipeline_statistics
.ia_vertices
- pq
->stats
.ia_vertices
;
454 pq
->stats
.ia_primitives
=
455 llvmpipe
->pipeline_statistics
.ia_primitives
- pq
->stats
.ia_primitives
;
456 pq
->stats
.vs_invocations
=
457 llvmpipe
->pipeline_statistics
.vs_invocations
- pq
->stats
.vs_invocations
;
458 pq
->stats
.gs_invocations
=
459 llvmpipe
->pipeline_statistics
.gs_invocations
- pq
->stats
.gs_invocations
;
460 pq
->stats
.gs_primitives
=
461 llvmpipe
->pipeline_statistics
.gs_primitives
- pq
->stats
.gs_primitives
;
462 pq
->stats
.c_invocations
=
463 llvmpipe
->pipeline_statistics
.c_invocations
- pq
->stats
.c_invocations
;
464 pq
->stats
.c_primitives
=
465 llvmpipe
->pipeline_statistics
.c_primitives
- pq
->stats
.c_primitives
;
466 pq
->stats
.ps_invocations
=
467 llvmpipe
->pipeline_statistics
.ps_invocations
- pq
->stats
.ps_invocations
;
468 pq
->stats
.cs_invocations
=
469 llvmpipe
->pipeline_statistics
.cs_invocations
- pq
->stats
.cs_invocations
;
470 pq
->stats
.hs_invocations
=
471 llvmpipe
->pipeline_statistics
.hs_invocations
- pq
->stats
.hs_invocations
;
472 pq
->stats
.ds_invocations
=
473 llvmpipe
->pipeline_statistics
.ds_invocations
- pq
->stats
.ds_invocations
;
474 llvmpipe
->active_statistics_queries
--;
476 case PIPE_QUERY_OCCLUSION_COUNTER
:
477 case PIPE_QUERY_OCCLUSION_PREDICATE
:
478 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
:
479 assert(llvmpipe
->active_occlusion_queries
);
480 llvmpipe
->active_occlusion_queries
--;
481 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
491 llvmpipe_check_render_cond(struct llvmpipe_context
*lp
)
493 struct pipe_context
*pipe
= &lp
->pipe
;
497 if (!lp
->render_cond_query
)
498 return TRUE
; /* no query predicate, draw normally */
500 wait
= (lp
->render_cond_mode
== PIPE_RENDER_COND_WAIT
||
501 lp
->render_cond_mode
== PIPE_RENDER_COND_BY_REGION_WAIT
);
503 b
= pipe
->get_query_result(pipe
, lp
->render_cond_query
, wait
, (void*)&result
);
505 return ((!result
) == lp
->render_cond_cond
);
511 llvmpipe_set_active_query_state(struct pipe_context
*pipe
, bool enable
)
513 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
515 llvmpipe
->queries_disabled
= !enable
;
516 /* for OQs we need to regenerate the fragment shader */
517 llvmpipe
->dirty
|= LP_NEW_OCCLUSION_QUERY
;
520 void llvmpipe_init_query_funcs(struct llvmpipe_context
*llvmpipe
)
522 llvmpipe
->pipe
.create_query
= llvmpipe_create_query
;
523 llvmpipe
->pipe
.destroy_query
= llvmpipe_destroy_query
;
524 llvmpipe
->pipe
.begin_query
= llvmpipe_begin_query
;
525 llvmpipe
->pipe
.end_query
= llvmpipe_end_query
;
526 llvmpipe
->pipe
.get_query_result
= llvmpipe_get_query_result
;
527 llvmpipe
->pipe
.get_query_result_resource
= llvmpipe_get_query_result_resource
;
528 llvmpipe
->pipe
.set_active_query_state
= llvmpipe_set_active_query_state
;