i965: Turn if (query->bo) into an assertion.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_queryobj.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /** @file brw_queryobj.c
29 *
30 * Support for query objects (GL_ARB_occlusion_query, GL_ARB_timer_query,
31 * GL_EXT_transform_feedback, and friends).
32 *
33 * The hardware provides a PIPE_CONTROL command that can report the number of
34 * fragments that passed the depth test, or the hardware timer. They are
35 * appropriately synced with the stage of the pipeline for our extensions'
36 * needs.
37 */
38 #include "main/imports.h"
39
40 #include "brw_context.h"
41 #include "brw_defines.h"
42 #include "brw_state.h"
43 #include "intel_batchbuffer.h"
44 #include "intel_reg.h"
45
46 /**
47 * Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
48 */
49 static void
50 write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
51 {
52 if (intel->gen >= 6) {
53 /* Emit workaround flushes: */
54 if (intel->gen == 6) {
55 /* The timestamp write below is a non-zero post-sync op, which on
56 * Gen6 necessitates a CS stall. CS stalls need stall at scoreboard
57 * set. See the comments for intel_emit_post_sync_nonzero_flush().
58 */
59 BEGIN_BATCH(4);
60 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
61 OUT_BATCH(PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD);
62 OUT_BATCH(0);
63 OUT_BATCH(0);
64 ADVANCE_BATCH();
65 }
66
67 BEGIN_BATCH(5);
68 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
69 OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
70 OUT_RELOC(query_bo,
71 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
72 PIPE_CONTROL_GLOBAL_GTT_WRITE |
73 idx * sizeof(uint64_t));
74 OUT_BATCH(0);
75 OUT_BATCH(0);
76 ADVANCE_BATCH();
77 } else {
78 BEGIN_BATCH(4);
79 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
80 PIPE_CONTROL_WRITE_TIMESTAMP);
81 OUT_RELOC(query_bo,
82 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
83 PIPE_CONTROL_GLOBAL_GTT_WRITE |
84 idx * sizeof(uint64_t));
85 OUT_BATCH(0);
86 OUT_BATCH(0);
87 ADVANCE_BATCH();
88 }
89 }
90
91 /**
92 * Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
93 */
94 static void
95 write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
96 {
97 if (intel->gen >= 6) {
98 /* Emit Sandybridge workaround flush: */
99 if (intel->gen == 6)
100 intel_emit_post_sync_nonzero_flush(intel);
101
102 BEGIN_BATCH(5);
103 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
104 OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
105 PIPE_CONTROL_WRITE_DEPTH_COUNT);
106 OUT_RELOC(query_bo,
107 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
108 PIPE_CONTROL_GLOBAL_GTT_WRITE |
109 (idx * sizeof(uint64_t)));
110 OUT_BATCH(0);
111 OUT_BATCH(0);
112 ADVANCE_BATCH();
113 } else {
114 BEGIN_BATCH(4);
115 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2) |
116 PIPE_CONTROL_DEPTH_STALL |
117 PIPE_CONTROL_WRITE_DEPTH_COUNT);
118 /* This object could be mapped cacheable, but we don't have an exposed
119 * mechanism to support that. Since it's going uncached, tell GEM that
120 * we're writing to it. The usual clflush should be all that's required
121 * to pick up the results.
122 */
123 OUT_RELOC(query_bo,
124 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
125 PIPE_CONTROL_GLOBAL_GTT_WRITE |
126 (idx * sizeof(uint64_t)));
127 OUT_BATCH(0);
128 OUT_BATCH(0);
129 ADVANCE_BATCH();
130 }
131 }
132
133 /**
134 * Wait on the query object's BO and calculate the final result.
135 */
136 static void
137 brw_queryobj_get_results(struct gl_context *ctx,
138 struct brw_query_object *query)
139 {
140 struct intel_context *intel = intel_context(ctx);
141
142 int i;
143 uint64_t *results;
144
145 if (query->bo == NULL)
146 return;
147
148 /* If the application has requested the query result, but this batch is
149 * still contributing to it, flush it now so the results will be present
150 * when mapped.
151 */
152 if (drm_intel_bo_references(intel->batch.bo, query->bo))
153 intel_batchbuffer_flush(intel);
154
155 if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
156 if (drm_intel_bo_busy(query->bo)) {
157 perf_debug("Stalling on the GPU waiting for a query object.\n");
158 }
159 }
160
161 drm_intel_bo_map(query->bo, false);
162 results = query->bo->virtual;
163 switch (query->Base.Target) {
164 case GL_TIME_ELAPSED_EXT:
165 /* The query BO contains the starting and ending timestamps.
166 * Subtract the two and convert to nanoseconds.
167 */
168 if (intel->gen >= 6)
169 query->Base.Result += 80 * (results[1] - results[0]);
170 else
171 query->Base.Result += 1000 * ((results[1] >> 32) - (results[0] >> 32));
172 break;
173
174 case GL_TIMESTAMP:
175 /* The query BO contains a single timestamp value in results[0]. */
176 if (intel->gen >= 6) {
177 /* Our timer is a clock that increments every 80ns (regardless of
178 * other clock scaling in the system). The timestamp register we can
179 * read for glGetTimestamp() masks out the top 32 bits, so we do that
180 * here too to let the two counters be compared against each other.
181 *
182 * If we just multiplied that 32 bits of data by 80, it would roll
183 * over at a non-power-of-two, so an application couldn't use
184 * GL_QUERY_COUNTER_BITS to handle rollover correctly. Instead, we
185 * report 36 bits and truncate at that (rolling over 5 times as often
186 * as the HW counter), and when the 32-bit counter rolls over, it
187 * happens to also be at a rollover in the reported value from near
188 * (1<<36) to 0.
189 *
190 * The low 32 bits rolls over in ~343 seconds. Our 36-bit result
191 * rolls over every ~69 seconds.
192 */
193 query->Base.Result = 80 * (results[0] & 0xffffffff);
194 query->Base.Result &= (1ull << 36) - 1;
195 } else {
196 query->Base.Result = 1000 * (results[0] >> 32);
197 }
198 break;
199
200 case GL_SAMPLES_PASSED_ARB:
201 /* Loop over pairs of values from the BO, which are the PS_DEPTH_COUNT
202 * value at the start and end of the batchbuffer. Subtract them to
203 * get the number of fragments which passed the depth test in each
204 * individual batch, and add those differences up to get the number
205 * of fragments for the entire query.
206 *
207 * Note that query->Base.Result may already be non-zero. We may have
208 * run out of space in the query's BO and allocated a new one. If so,
209 * this function was already called to accumulate the results so far.
210 */
211 for (i = 0; i <= query->last_index; i++) {
212 query->Base.Result += results[i * 2 + 1] - results[i * 2];
213 }
214 break;
215
216 case GL_ANY_SAMPLES_PASSED:
217 case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
218 /* If the starting and ending PS_DEPTH_COUNT from any of the batches
219 * differ, then some fragments passed the depth test.
220 */
221 for (i = 0; i <= query->last_index; i++) {
222 if (results[i * 2 + 1] != results[i * 2]) {
223 query->Base.Result = GL_TRUE;
224 break;
225 }
226 }
227 break;
228
229 case GL_PRIMITIVES_GENERATED:
230 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
231 /* We don't actually query the hardware for this value, so query->bo
232 * should always be NULL and execution should never reach here.
233 */
234 assert(!"Unreachable");
235 break;
236
237 default:
238 assert(!"Unrecognized query target in brw_queryobj_get_results()");
239 break;
240 }
241 drm_intel_bo_unmap(query->bo);
242
243 /* Now that we've processed the data stored in the query's buffer object,
244 * we can release it.
245 */
246 drm_intel_bo_unreference(query->bo);
247 query->bo = NULL;
248 }
249
250 /**
251 * The NewQueryObject() driver hook.
252 *
253 * Allocates and initializes a new query object.
254 */
255 static struct gl_query_object *
256 brw_new_query_object(struct gl_context *ctx, GLuint id)
257 {
258 struct brw_query_object *query;
259
260 query = calloc(1, sizeof(struct brw_query_object));
261
262 query->Base.Id = id;
263 query->Base.Result = 0;
264 query->Base.Active = false;
265 query->Base.Ready = true;
266
267 return &query->Base;
268 }
269
270 /**
271 * The DeleteQuery() driver hook.
272 */
273 static void
274 brw_delete_query(struct gl_context *ctx, struct gl_query_object *q)
275 {
276 struct brw_query_object *query = (struct brw_query_object *)q;
277
278 drm_intel_bo_unreference(query->bo);
279 free(query);
280 }
281
282 /**
283 * Driver hook for glBeginQuery().
284 *
285 * Initializes driver structures and emits any GPU commands required to begin
286 * recording data for the query.
287 */
288 static void
289 brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
290 {
291 struct brw_context *brw = brw_context(ctx);
292 struct intel_context *intel = intel_context(ctx);
293 struct brw_query_object *query = (struct brw_query_object *)q;
294
295 switch (query->Base.Target) {
296 case GL_TIME_ELAPSED_EXT:
297 /* For timestamp queries, we record the starting time right away so that
298 * we measure the full time between BeginQuery and EndQuery. There's
299 * some debate about whether this is the right thing to do. Our decision
300 * is based on the following text from the ARB_timer_query extension:
301 *
302 * "(5) Should the extension measure total time elapsed between the full
303 * completion of the BeginQuery and EndQuery commands, or just time
304 * spent in the graphics library?
305 *
306 * RESOLVED: This extension will measure the total time elapsed
307 * between the full completion of these commands. Future extensions
308 * may implement a query to determine time elapsed at different stages
309 * of the graphics pipeline."
310 *
311 * We write a starting timestamp now (at index 0). At EndQuery() time,
312 * we'll write a second timestamp (at index 1), and subtract the two to
313 * obtain the time elapsed. Notably, this includes time elapsed while
314 * the system was doing other work, such as running other applications.
315 */
316 drm_intel_bo_unreference(query->bo);
317 query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query", 4096, 4096);
318 write_timestamp(intel, query->bo, 0);
319 break;
320
321 case GL_ANY_SAMPLES_PASSED:
322 case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
323 case GL_SAMPLES_PASSED_ARB:
324 /* For occlusion queries, we delay taking an initial sample until the
325 * first drawing occurs in this batch. See the reasoning in the comments
326 * for brw_emit_query_begin() below.
327 *
328 * Since we're starting a new query, we need to be sure to throw away
329 * any previous occlusion query results.
330 */
331 drm_intel_bo_unreference(query->bo);
332 query->bo = NULL;
333 query->last_index = -1;
334
335 brw->query.obj = query;
336
337 /* Depth statistics on Gen4 require strange workarounds, so we try to
338 * avoid them when necessary. They're required for occlusion queries,
339 * so turn them on now.
340 */
341 intel->stats_wm++;
342 break;
343
344 case GL_PRIMITIVES_GENERATED:
345 /* We don't actually query the hardware for this value; we keep track of
346 * it a software counter. So just reset the counter.
347 */
348 brw->sol.primitives_generated = 0;
349 brw->sol.counting_primitives_generated = true;
350 break;
351
352 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
353 /* We don't actually query the hardware for this value; we keep track of
354 * it a software counter. So just reset the counter.
355 */
356 brw->sol.primitives_written = 0;
357 brw->sol.counting_primitives_written = true;
358 break;
359
360 default:
361 assert(!"Unrecognized query target in brw_begin_query()");
362 break;
363 }
364 }
365
366 /**
367 * Driver hook for glEndQuery().
368 *
369 * Emits GPU commands to record a final query value, ending any data capturing.
370 * However, the final result isn't necessarily available until the GPU processes
371 * those commands. brw_queryobj_get_results() processes the captured data to
372 * produce the final result.
373 */
374 static void
375 brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
376 {
377 struct brw_context *brw = brw_context(ctx);
378 struct intel_context *intel = intel_context(ctx);
379 struct brw_query_object *query = (struct brw_query_object *)q;
380
381 switch (query->Base.Target) {
382 case GL_TIME_ELAPSED_EXT:
383 /* Write the final timestamp. */
384 write_timestamp(intel, query->bo, 1);
385 break;
386
387 case GL_ANY_SAMPLES_PASSED:
388 case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
389 case GL_SAMPLES_PASSED_ARB:
390
391 /* No query->bo means that EndQuery was called after BeginQuery with no
392 * intervening drawing. Rather than doing nothing at all here in this
393 * case, we emit the query_begin and query_end state to the
394 * hardware. This is to guarantee that waiting on the result of this
395 * empty state will cause all previous queries to complete at all, as
396 * required by the specification:
397 *
398 * It must always be true that if any query object
399 * returns a result available of TRUE, all queries of the
400 * same type issued prior to that query must also return
401 * TRUE. [Open GL 4.3 (Core Profile) Section 4.2.1]
402 */
403 if (!query->bo) {
404 brw_emit_query_begin(brw);
405 }
406
407 assert(query->bo);
408
409 brw_emit_query_end(brw);
410
411 drm_intel_bo_unreference(brw->query.bo);
412 brw->query.bo = NULL;
413
414 brw->query.obj = NULL;
415
416 intel->stats_wm--;
417 break;
418
419 case GL_PRIMITIVES_GENERATED:
420 /* We don't actually query the hardware for this value; we keep track of
421 * it in a software counter. So just read the counter and store it in
422 * the query object.
423 */
424 query->Base.Result = brw->sol.primitives_generated;
425 brw->sol.counting_primitives_generated = false;
426
427 /* And set brw->query.obj to NULL so that this query won't try to wait
428 * for any rendering to complete.
429 */
430 query->bo = NULL;
431 break;
432
433 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
434 /* We don't actually query the hardware for this value; we keep track of
435 * it in a software counter. So just read the counter and store it in
436 * the query object.
437 */
438 query->Base.Result = brw->sol.primitives_written;
439 brw->sol.counting_primitives_written = false;
440
441 /* And set brw->query.obj to NULL so that this query won't try to wait
442 * for any rendering to complete.
443 */
444 query->bo = NULL;
445 break;
446
447 default:
448 assert(!"Unrecognized query target in brw_end_query()");
449 break;
450 }
451 }
452
453 /**
454 * The WaitQuery() driver hook.
455 *
456 * Wait for a query result to become available and return it. This is the
457 * backing for glGetQueryObjectiv() with the GL_QUERY_RESULT pname.
458 */
459 static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
460 {
461 struct brw_query_object *query = (struct brw_query_object *)q;
462
463 brw_queryobj_get_results(ctx, query);
464 query->Base.Ready = true;
465 }
466
467 /**
468 * The CheckQuery() driver hook.
469 *
470 * Checks whether a query result is ready yet. If not, flushes.
471 * This is the backing for glGetQueryObjectiv()'s QUERY_RESULT_AVAILABLE pname.
472 */
473 static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
474 {
475 struct intel_context *intel = intel_context(ctx);
476 struct brw_query_object *query = (struct brw_query_object *)q;
477
478 /* From the GL_ARB_occlusion_query spec:
479 *
480 * "Instead of allowing for an infinite loop, performing a
481 * QUERY_RESULT_AVAILABLE_ARB will perform a flush if the result is
482 * not ready yet on the first time it is queried. This ensures that
483 * the async query will return true in finite time.
484 */
485 if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
486 intel_batchbuffer_flush(intel);
487
488 if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
489 brw_queryobj_get_results(ctx, query);
490 query->Base.Ready = true;
491 }
492 }
493
494 /**
495 * Record the PS_DEPTH_COUNT value (for occlusion queries) just before
496 * primitive drawing.
497 *
498 * In a pre-hardware context world, the single PS_DEPTH_COUNT register is
499 * shared among all applications using the GPU. However, our query value
500 * needs to only include fragments generated by our application/GL context.
501 *
502 * To accommodate this, we record PS_DEPTH_COUNT at the start and end of
503 * each batchbuffer (technically, the first primitive drawn and flush time).
504 * Subtracting each pair of values calculates the change in PS_DEPTH_COUNT
505 * caused by a batchbuffer. Since there is no preemption inside batches,
506 * this is guaranteed to only measure the effects of our current application.
507 *
508 * Adding each of these differences (in case drawing is done over many batches)
509 * produces the final expected value.
510 *
511 * In a world with hardware contexts, PS_DEPTH_COUNT is saved and restored
512 * as part of the context state, so this is unnecessary. We could simply
513 * read two values and subtract them. However, it's safe to continue using
514 * the old approach.
515 */
516 void
517 brw_emit_query_begin(struct brw_context *brw)
518 {
519 struct intel_context *intel = &brw->intel;
520 struct gl_context *ctx = &intel->ctx;
521 struct brw_query_object *query = brw->query.obj;
522
523 /* Skip if we're not doing any queries, or we've already recorded the
524 * initial query value for this batchbuffer.
525 */
526 if (!query || brw->query.begin_emitted)
527 return;
528
529 /* Ensure the buffer has enough space to store a new pair of values.
530 * If not, create a new one of the same size; we'll gather the existing
531 * buffer's results momentarily.
532 */
533 if (brw->query.bo == NULL ||
534 query->last_index * 2 + 1 >= 4096 / sizeof(uint64_t)) {
535
536 if (query->bo != NULL) {
537 /* The old query BO did not have enough space, so we allocated a new
538 * one. Gather the results so far (adding up the differences) and
539 * release the old BO.
540 */
541 brw_queryobj_get_results(ctx, query);
542 }
543 drm_intel_bo_unreference(brw->query.bo);
544 brw->query.bo = NULL;
545
546 brw->query.bo = drm_intel_bo_alloc(intel->bufmgr, "query", 4096, 1);
547 drm_intel_bo_reference(brw->query.bo);
548
549 /* Fill the buffer with zeroes. This is probably superfluous. */
550 drm_intel_bo_map(brw->query.bo, true);
551 memset((char *)brw->query.bo->virtual, 0, 4096);
552 drm_intel_bo_unmap(brw->query.bo);
553
554 query->last_index = 0;
555 query->bo = brw->query.bo;
556 }
557
558 write_depth_count(intel, brw->query.bo, query->last_index * 2);
559
560 brw->query.begin_emitted = true;
561 }
562
563 /**
564 * Called at batchbuffer flush to get an ending PS_DEPTH_COUNT.
565 *
566 * See the explanation in brw_emit_query_begin().
567 */
568 void
569 brw_emit_query_end(struct brw_context *brw)
570 {
571 struct intel_context *intel = &brw->intel;
572 struct brw_query_object *query = brw->query.obj;
573
574 if (!brw->query.begin_emitted)
575 return;
576
577 write_depth_count(intel, brw->query.bo, query->last_index * 2 + 1);
578
579 brw->query.begin_emitted = false;
580 query->last_index++;
581 }
582
583 /**
584 * Driver hook for glQueryCounter().
585 *
586 * This handles GL_TIMESTAMP queries, which perform a pipelined read of the
587 * current GPU time. This is unlike GL_TIME_ELAPSED, which measures the
588 * time while the query is active.
589 */
590 static void
591 brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
592 {
593 struct intel_context *intel = intel_context(ctx);
594 struct brw_query_object *query = (struct brw_query_object *) q;
595
596 assert(q->Target == GL_TIMESTAMP);
597
598 drm_intel_bo_unreference(query->bo);
599 query->bo = drm_intel_bo_alloc(intel->bufmgr, "timestamp query", 4096, 4096);
600 write_timestamp(intel, query->bo, 0);
601 }
602
603 /**
604 * Read the TIMESTAMP register immediately (in a non-pipelined fashion).
605 *
606 * This is used to implement the GetTimestamp() driver hook.
607 */
608 static uint64_t
609 brw_get_timestamp(struct gl_context *ctx)
610 {
611 struct intel_context *intel = intel_context(ctx);
612 uint64_t result = 0;
613
614 drm_intel_reg_read(intel->bufmgr, TIMESTAMP, &result);
615
616 /* See logic in brw_queryobj_get_results() */
617 result = result >> 32;
618 result *= 80;
619 result &= (1ull << 36) - 1;
620
621 return result;
622 }
623
624 void brw_init_queryobj_functions(struct dd_function_table *functions)
625 {
626 functions->NewQueryObject = brw_new_query_object;
627 functions->DeleteQuery = brw_delete_query;
628 functions->BeginQuery = brw_begin_query;
629 functions->EndQuery = brw_end_query;
630 functions->QueryCounter = brw_query_counter;
631 functions->CheckQuery = brw_check_query;
632 functions->WaitQuery = brw_wait_query;
633 functions->GetTimestamp = brw_get_timestamp;
634 }