2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 /** @file brw_queryobj.c
30 * Support for query objects (GL_ARB_occlusion_query, GL_ARB_timer_query,
31 * GL_EXT_transform_feedback, and friends).
33 * The hardware provides a PIPE_CONTROL command that can report the number of
34 * fragments that passed the depth test, or the hardware timer. They are
35 * appropriately synced with the stage of the pipeline for our extensions'
38 #include "main/imports.h"
40 #include "brw_context.h"
41 #include "brw_defines.h"
42 #include "brw_state.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_reg.h"
47 * Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
50 write_timestamp(struct intel_context
*intel
, drm_intel_bo
*query_bo
, int idx
)
52 if (intel
->gen
>= 6) {
53 /* Emit workaround flushes: */
54 if (intel
->gen
== 6) {
55 /* The timestamp write below is a non-zero post-sync op, which on
56 * Gen6 necessitates a CS stall. CS stalls need stall at scoreboard
57 * set. See the comments for intel_emit_post_sync_nonzero_flush().
60 OUT_BATCH(_3DSTATE_PIPE_CONTROL
| (4 - 2));
61 OUT_BATCH(PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
);
68 OUT_BATCH(_3DSTATE_PIPE_CONTROL
| (5 - 2));
69 OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP
);
71 I915_GEM_DOMAIN_INSTRUCTION
, I915_GEM_DOMAIN_INSTRUCTION
,
72 PIPE_CONTROL_GLOBAL_GTT_WRITE
|
73 idx
* sizeof(uint64_t));
79 OUT_BATCH(_3DSTATE_PIPE_CONTROL
| (4 - 2) |
80 PIPE_CONTROL_WRITE_TIMESTAMP
);
82 I915_GEM_DOMAIN_INSTRUCTION
, I915_GEM_DOMAIN_INSTRUCTION
,
83 PIPE_CONTROL_GLOBAL_GTT_WRITE
|
84 idx
* sizeof(uint64_t));
92 * Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
95 write_depth_count(struct intel_context
*intel
, drm_intel_bo
*query_bo
, int idx
)
97 if (intel
->gen
>= 6) {
98 /* Emit Sandybridge workaround flush: */
100 intel_emit_post_sync_nonzero_flush(intel
);
103 OUT_BATCH(_3DSTATE_PIPE_CONTROL
| (5 - 2));
104 OUT_BATCH(PIPE_CONTROL_DEPTH_STALL
|
105 PIPE_CONTROL_WRITE_DEPTH_COUNT
);
107 I915_GEM_DOMAIN_INSTRUCTION
, I915_GEM_DOMAIN_INSTRUCTION
,
108 PIPE_CONTROL_GLOBAL_GTT_WRITE
|
109 (idx
* sizeof(uint64_t)));
115 OUT_BATCH(_3DSTATE_PIPE_CONTROL
| (4 - 2) |
116 PIPE_CONTROL_DEPTH_STALL
|
117 PIPE_CONTROL_WRITE_DEPTH_COUNT
);
118 /* This object could be mapped cacheable, but we don't have an exposed
119 * mechanism to support that. Since it's going uncached, tell GEM that
120 * we're writing to it. The usual clflush should be all that's required
121 * to pick up the results.
124 I915_GEM_DOMAIN_INSTRUCTION
, I915_GEM_DOMAIN_INSTRUCTION
,
125 PIPE_CONTROL_GLOBAL_GTT_WRITE
|
126 (idx
* sizeof(uint64_t)));
134 * Wait on the query object's BO and calculate the final result.
137 brw_queryobj_get_results(struct gl_context
*ctx
,
138 struct brw_query_object
*query
)
140 struct intel_context
*intel
= intel_context(ctx
);
145 if (query
->bo
== NULL
)
148 /* If the application has requested the query result, but this batch is
149 * still contributing to it, flush it now so the results will be present
152 if (drm_intel_bo_references(intel
->batch
.bo
, query
->bo
))
153 intel_batchbuffer_flush(intel
);
155 if (unlikely(intel
->perf_debug
)) {
156 if (drm_intel_bo_busy(query
->bo
)) {
157 perf_debug("Stalling on the GPU waiting for a query object.\n");
161 drm_intel_bo_map(query
->bo
, false);
162 results
= query
->bo
->virtual;
163 switch (query
->Base
.Target
) {
164 case GL_TIME_ELAPSED_EXT
:
165 /* The query BO contains the starting and ending timestamps.
166 * Subtract the two and convert to nanoseconds.
169 query
->Base
.Result
+= 80 * (results
[1] - results
[0]);
171 query
->Base
.Result
+= 1000 * ((results
[1] >> 32) - (results
[0] >> 32));
175 /* The query BO contains a single timestamp value in results[0]. */
176 if (intel
->gen
>= 6) {
177 /* Our timer is a clock that increments every 80ns (regardless of
178 * other clock scaling in the system). The timestamp register we can
179 * read for glGetTimestamp() masks out the top 32 bits, so we do that
180 * here too to let the two counters be compared against each other.
182 * If we just multiplied that 32 bits of data by 80, it would roll
183 * over at a non-power-of-two, so an application couldn't use
184 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
185 * report 36 bits and truncate at that (rolling over 5 times as often
186 * as the HW counter), and when the 32-bit counter rolls over, it
187 * happens to also be at a rollover in the reported value from near
190 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
191 * rolls over every ~69 seconds.
193 query
->Base
.Result
= 80 * (results
[0] & 0xffffffff);
194 query
->Base
.Result
&= (1ull << 36) - 1;
196 query
->Base
.Result
= 1000 * (results
[0] >> 32);
200 case GL_SAMPLES_PASSED_ARB
:
201 /* Loop over pairs of values from the BO, which are the PS_DEPTH_COUNT
202 * value at the start and end of the batchbuffer. Subtract them to
203 * get the number of fragments which passed the depth test in each
204 * individual batch, and add those differences up to get the number
205 * of fragments for the entire query.
207 * Note that query->Base.Result may already be non-zero. We may have
208 * run out of space in the query's BO and allocated a new one. If so,
209 * this function was already called to accumulate the results so far.
211 for (i
= 0; i
< query
->last_index
; i
++) {
212 query
->Base
.Result
+= results
[i
* 2 + 1] - results
[i
* 2];
216 case GL_ANY_SAMPLES_PASSED
:
217 case GL_ANY_SAMPLES_PASSED_CONSERVATIVE
:
218 /* If the starting and ending PS_DEPTH_COUNT from any of the batches
219 * differ, then some fragments passed the depth test.
221 for (i
= 0; i
< query
->last_index
; i
++) {
222 if (results
[i
* 2 + 1] != results
[i
* 2]) {
223 query
->Base
.Result
= GL_TRUE
;
229 case GL_PRIMITIVES_GENERATED
:
230 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN
:
231 /* We don't actually query the hardware for this value, so query->bo
232 * should always be NULL and execution should never reach here.
234 assert(!"Unreachable");
238 assert(!"Unrecognized query target in brw_queryobj_get_results()");
241 drm_intel_bo_unmap(query
->bo
);
243 /* Now that we've processed the data stored in the query's buffer object,
246 drm_intel_bo_unreference(query
->bo
);
251 * The NewQueryObject() driver hook.
253 * Allocates and initializes a new query object.
255 static struct gl_query_object
*
256 brw_new_query_object(struct gl_context
*ctx
, GLuint id
)
258 struct brw_query_object
*query
;
260 query
= calloc(1, sizeof(struct brw_query_object
));
263 query
->Base
.Result
= 0;
264 query
->Base
.Active
= false;
265 query
->Base
.Ready
= true;
271 * The DeleteQuery() driver hook.
274 brw_delete_query(struct gl_context
*ctx
, struct gl_query_object
*q
)
276 struct brw_query_object
*query
= (struct brw_query_object
*)q
;
278 drm_intel_bo_unreference(query
->bo
);
283 * Driver hook for glBeginQuery().
285 * Initializes driver structures and emits any GPU commands required to begin
286 * recording data for the query.
289 brw_begin_query(struct gl_context
*ctx
, struct gl_query_object
*q
)
291 struct brw_context
*brw
= brw_context(ctx
);
292 struct intel_context
*intel
= intel_context(ctx
);
293 struct brw_query_object
*query
= (struct brw_query_object
*)q
;
295 switch (query
->Base
.Target
) {
296 case GL_TIME_ELAPSED_EXT
:
297 /* For timestamp queries, we record the starting time right away so that
298 * we measure the full time between BeginQuery and EndQuery. There's
299 * some debate about whether this is the right thing to do. Our decision
300 * is based on the following text from the ARB_timer_query extension:
302 * "(5) Should the extension measure total time elapsed between the full
303 * completion of the BeginQuery and EndQuery commands, or just time
304 * spent in the graphics library?
306 * RESOLVED: This extension will measure the total time elapsed
307 * between the full completion of these commands. Future extensions
308 * may implement a query to determine time elapsed at different stages
309 * of the graphics pipeline."
311 * We write a starting timestamp now (at index 0). At EndQuery() time,
312 * we'll write a second timestamp (at index 1), and subtract the two to
313 * obtain the time elapsed. Notably, this includes time elapsed while
314 * the system was doing other work, such as running other applications.
316 drm_intel_bo_unreference(query
->bo
);
317 query
->bo
= drm_intel_bo_alloc(intel
->bufmgr
, "timer query", 4096, 4096);
318 write_timestamp(intel
, query
->bo
, 0);
321 case GL_ANY_SAMPLES_PASSED
:
322 case GL_ANY_SAMPLES_PASSED_CONSERVATIVE
:
323 case GL_SAMPLES_PASSED_ARB
:
324 /* For occlusion queries, we delay taking an initial sample until the
325 * first drawing occurs in this batch. See the reasoning in the comments
326 * for brw_emit_query_begin() below.
328 * Since we're starting a new query, we need to be sure to throw away
329 * any previous occlusion query results.
331 drm_intel_bo_unreference(query
->bo
);
333 query
->last_index
= -1;
335 brw
->query
.obj
= query
;
337 /* Depth statistics on Gen4 require strange workarounds, so we try to
338 * avoid them when necessary. They're required for occlusion queries,
339 * so turn them on now.
342 brw
->state
.dirty
.brw
|= BRW_NEW_STATS_WM
;
345 case GL_PRIMITIVES_GENERATED
:
346 /* We don't actually query the hardware for this value; we keep track of
347 * it a software counter. So just reset the counter.
349 brw
->sol
.primitives_generated
= 0;
350 brw
->sol
.counting_primitives_generated
= true;
353 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN
:
354 /* We don't actually query the hardware for this value; we keep track of
355 * it a software counter. So just reset the counter.
357 brw
->sol
.primitives_written
= 0;
358 brw
->sol
.counting_primitives_written
= true;
362 assert(!"Unrecognized query target in brw_begin_query()");
368 * Driver hook for glEndQuery().
370 * Emits GPU commands to record a final query value, ending any data capturing.
371 * However, the final result isn't necessarily available until the GPU processes
372 * those commands. brw_queryobj_get_results() processes the captured data to
373 * produce the final result.
376 brw_end_query(struct gl_context
*ctx
, struct gl_query_object
*q
)
378 struct brw_context
*brw
= brw_context(ctx
);
379 struct intel_context
*intel
= intel_context(ctx
);
380 struct brw_query_object
*query
= (struct brw_query_object
*)q
;
382 switch (query
->Base
.Target
) {
383 case GL_TIME_ELAPSED_EXT
:
384 /* Write the final timestamp. */
385 write_timestamp(intel
, query
->bo
, 1);
388 case GL_ANY_SAMPLES_PASSED
:
389 case GL_ANY_SAMPLES_PASSED_CONSERVATIVE
:
390 case GL_SAMPLES_PASSED_ARB
:
392 /* No query->bo means that EndQuery was called after BeginQuery with no
393 * intervening drawing. Rather than doing nothing at all here in this
394 * case, we emit the query_begin and query_end state to the
395 * hardware. This is to guarantee that waiting on the result of this
396 * empty state will cause all previous queries to complete at all, as
397 * required by the specification:
399 * It must always be true that if any query object
400 * returns a result available of TRUE, all queries of the
401 * same type issued prior to that query must also return
402 * TRUE. [Open GL 4.3 (Core Profile) Section 4.2.1]
405 brw_emit_query_begin(brw
);
410 brw_emit_query_end(brw
);
412 brw
->query
.obj
= NULL
;
415 brw
->state
.dirty
.brw
|= BRW_NEW_STATS_WM
;
418 case GL_PRIMITIVES_GENERATED
:
419 /* We don't actually query the hardware for this value; we keep track of
420 * it in a software counter. So just read the counter and store it in
423 query
->Base
.Result
= brw
->sol
.primitives_generated
;
424 brw
->sol
.counting_primitives_generated
= false;
426 /* And set query->bo to NULL so that this query won't try to wait
427 * for any rendering to complete.
432 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN
:
433 /* We don't actually query the hardware for this value; we keep track of
434 * it in a software counter. So just read the counter and store it in
437 query
->Base
.Result
= brw
->sol
.primitives_written
;
438 brw
->sol
.counting_primitives_written
= false;
440 /* And set query->bo to NULL so that this query won't try to wait
441 * for any rendering to complete.
447 assert(!"Unrecognized query target in brw_end_query()");
453 * The WaitQuery() driver hook.
455 * Wait for a query result to become available and return it. This is the
456 * backing for glGetQueryObjectiv() with the GL_QUERY_RESULT pname.
458 static void brw_wait_query(struct gl_context
*ctx
, struct gl_query_object
*q
)
460 struct brw_query_object
*query
= (struct brw_query_object
*)q
;
462 brw_queryobj_get_results(ctx
, query
);
463 query
->Base
.Ready
= true;
467 * The CheckQuery() driver hook.
469 * Checks whether a query result is ready yet. If not, flushes.
470 * This is the backing for glGetQueryObjectiv()'s QUERY_RESULT_AVAILABLE pname.
472 static void brw_check_query(struct gl_context
*ctx
, struct gl_query_object
*q
)
474 struct intel_context
*intel
= intel_context(ctx
);
475 struct brw_query_object
*query
= (struct brw_query_object
*)q
;
477 /* From the GL_ARB_occlusion_query spec:
479 * "Instead of allowing for an infinite loop, performing a
480 * QUERY_RESULT_AVAILABLE_ARB will perform a flush if the result is
481 * not ready yet on the first time it is queried. This ensures that
482 * the async query will return true in finite time.
484 if (query
->bo
&& drm_intel_bo_references(intel
->batch
.bo
, query
->bo
))
485 intel_batchbuffer_flush(intel
);
487 if (query
->bo
== NULL
|| !drm_intel_bo_busy(query
->bo
)) {
488 brw_queryobj_get_results(ctx
, query
);
489 query
->Base
.Ready
= true;
494 * Ensure there query's BO has enough space to store a new pair of values.
496 * If not, gather the existing BO's results and create a new buffer of the
500 ensure_bo_has_space(struct gl_context
*ctx
, struct brw_query_object
*query
)
502 struct intel_context
*intel
= intel_context(ctx
);
504 if (!query
->bo
|| query
->last_index
* 2 + 1 >= 4096 / sizeof(uint64_t)) {
506 if (query
->bo
!= NULL
) {
507 /* The old query BO did not have enough space, so we allocated a new
508 * one. Gather the results so far (adding up the differences) and
509 * release the old BO.
511 brw_queryobj_get_results(ctx
, query
);
514 query
->bo
= drm_intel_bo_alloc(intel
->bufmgr
, "query", 4096, 1);
515 query
->last_index
= 0;
520 * Record the PS_DEPTH_COUNT value (for occlusion queries) just before
523 * In a pre-hardware context world, the single PS_DEPTH_COUNT register is
524 * shared among all applications using the GPU. However, our query value
525 * needs to only include fragments generated by our application/GL context.
527 * To accommodate this, we record PS_DEPTH_COUNT at the start and end of
528 * each batchbuffer (technically, the first primitive drawn and flush time).
529 * Subtracting each pair of values calculates the change in PS_DEPTH_COUNT
530 * caused by a batchbuffer. Since there is no preemption inside batches,
531 * this is guaranteed to only measure the effects of our current application.
533 * Adding each of these differences (in case drawing is done over many batches)
534 * produces the final expected value.
536 * In a world with hardware contexts, PS_DEPTH_COUNT is saved and restored
537 * as part of the context state, so this is unnecessary. We could simply
538 * read two values and subtract them. However, it's safe to continue using
542 brw_emit_query_begin(struct brw_context
*brw
)
544 struct intel_context
*intel
= &brw
->intel
;
545 struct gl_context
*ctx
= &intel
->ctx
;
546 struct brw_query_object
*query
= brw
->query
.obj
;
548 /* Skip if we're not doing any queries, or we've already recorded the
549 * initial query value for this batchbuffer.
551 if (!query
|| brw
->query
.begin_emitted
)
554 ensure_bo_has_space(ctx
, query
);
556 write_depth_count(intel
, query
->bo
, query
->last_index
* 2);
558 brw
->query
.begin_emitted
= true;
562 * Called at batchbuffer flush to get an ending PS_DEPTH_COUNT.
564 * See the explanation in brw_emit_query_begin().
567 brw_emit_query_end(struct brw_context
*brw
)
569 struct intel_context
*intel
= &brw
->intel
;
570 struct brw_query_object
*query
= brw
->query
.obj
;
572 if (!brw
->query
.begin_emitted
)
575 write_depth_count(intel
, query
->bo
, query
->last_index
* 2 + 1);
577 brw
->query
.begin_emitted
= false;
582 * Driver hook for glQueryCounter().
584 * This handles GL_TIMESTAMP queries, which perform a pipelined read of the
585 * current GPU time. This is unlike GL_TIME_ELAPSED, which measures the
586 * time while the query is active.
589 brw_query_counter(struct gl_context
*ctx
, struct gl_query_object
*q
)
591 struct intel_context
*intel
= intel_context(ctx
);
592 struct brw_query_object
*query
= (struct brw_query_object
*) q
;
594 assert(q
->Target
== GL_TIMESTAMP
);
596 drm_intel_bo_unreference(query
->bo
);
597 query
->bo
= drm_intel_bo_alloc(intel
->bufmgr
, "timestamp query", 4096, 4096);
598 write_timestamp(intel
, query
->bo
, 0);
602 * Read the TIMESTAMP register immediately (in a non-pipelined fashion).
604 * This is used to implement the GetTimestamp() driver hook.
607 brw_get_timestamp(struct gl_context
*ctx
)
609 struct intel_context
*intel
= intel_context(ctx
);
612 drm_intel_reg_read(intel
->bufmgr
, TIMESTAMP
, &result
);
614 /* See logic in brw_queryobj_get_results() */
615 result
= result
>> 32;
617 result
&= (1ull << 36) - 1;
622 void brw_init_queryobj_functions(struct dd_function_table
*functions
)
624 functions
->NewQueryObject
= brw_new_query_object
;
625 functions
->DeleteQuery
= brw_delete_query
;
626 functions
->BeginQuery
= brw_begin_query
;
627 functions
->EndQuery
= brw_end_query
;
628 functions
->QueryCounter
= brw_query_counter
;
629 functions
->CheckQuery
= brw_check_query
;
630 functions
->WaitQuery
= brw_wait_query
;
631 functions
->GetTimestamp
= brw_get_timestamp
;