i965: fix transform feedback with primitive restart
[mesa.git] / src / mesa / drivers / dri / i965 / brw_queryobj.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /** @file support for ARB_query_object
29 *
30 * ARB_query_object is implemented by using the PIPE_CONTROL command to stall
31 * execution on the completion of previous depth tests, and write the
32 * current PS_DEPTH_COUNT to a buffer object.
33 *
34 * We use before and after counts when drawing during a query so that
35 * we don't pick up other clients' query data in ours. To reduce overhead,
36 * a single BO is used to record the query data for all active queries at
37 * once. This also gives us a simple bound on how much batchbuffer space is
38 * required for handling queries, so that we can be sure that we won't
39 * have to emit a batchbuffer without getting the ending PS_DEPTH_COUNT.
40 */
41 #include "main/imports.h"
42
43 #include "brw_context.h"
44 #include "brw_state.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_reg.h"
47
48 /** Waits on the query object's BO and totals the results for this query */
49 static void
50 brw_queryobj_get_results(struct gl_context *ctx,
51 struct brw_query_object *query)
52 {
53 struct intel_context *intel = intel_context(ctx);
54
55 int i;
56 uint64_t *results;
57
58 if (query->bo == NULL)
59 return;
60
61 drm_intel_bo_map(query->bo, false);
62 results = query->bo->virtual;
63 switch (query->Base.Target) {
64 case GL_TIME_ELAPSED_EXT:
65 if (intel->gen >= 6)
66 query->Base.Result += 80 * (results[1] - results[0]);
67 else
68 query->Base.Result += 1000 * ((results[1] >> 32) - (results[0] >> 32));
69 break;
70
71 case GL_SAMPLES_PASSED_ARB:
72 /* Map and count the pixels from the current query BO */
73 for (i = query->first_index; i <= query->last_index; i++) {
74 query->Base.Result += results[i * 2 + 1] - results[i * 2];
75 }
76 break;
77
78 case GL_PRIMITIVES_GENERATED:
79 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
80 /* We don't actually query the hardware for this value, so query->bo
81 * should always be NULL and execution should never reach here.
82 */
83 assert(!"Unreachable");
84 break;
85
86 default:
87 assert(!"Unrecognized query target in brw_queryobj_get_results()");
88 break;
89 }
90 drm_intel_bo_unmap(query->bo);
91
92 drm_intel_bo_unreference(query->bo);
93 query->bo = NULL;
94 }
95
96 static struct gl_query_object *
97 brw_new_query_object(struct gl_context *ctx, GLuint id)
98 {
99 struct brw_query_object *query;
100
101 query = calloc(1, sizeof(struct brw_query_object));
102
103 query->Base.Id = id;
104 query->Base.Result = 0;
105 query->Base.Active = false;
106 query->Base.Ready = true;
107
108 return &query->Base;
109 }
110
111 static void
112 brw_delete_query(struct gl_context *ctx, struct gl_query_object *q)
113 {
114 struct brw_query_object *query = (struct brw_query_object *)q;
115
116 drm_intel_bo_unreference(query->bo);
117 free(query);
118 }
119
120 static void
121 brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
122 {
123 struct brw_context *brw = brw_context(ctx);
124 struct intel_context *intel = intel_context(ctx);
125 struct brw_query_object *query = (struct brw_query_object *)q;
126
127 switch (query->Base.Target) {
128 case GL_TIME_ELAPSED_EXT:
129 drm_intel_bo_unreference(query->bo);
130 query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query",
131 4096, 4096);
132
133 if (intel->gen >= 6) {
134 BEGIN_BATCH(4);
135 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
136 OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
137 OUT_RELOC(query->bo,
138 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
139 PIPE_CONTROL_GLOBAL_GTT_WRITE |
140 0);
141 OUT_BATCH(0);
142 ADVANCE_BATCH();
143
144 } else {
145 BEGIN_BATCH(4);
146 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
147 PIPE_CONTROL_WRITE_TIMESTAMP);
148 OUT_RELOC(query->bo,
149 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
150 PIPE_CONTROL_GLOBAL_GTT_WRITE |
151 0);
152 OUT_BATCH(0);
153 OUT_BATCH(0);
154 ADVANCE_BATCH();
155 }
156 break;
157
158 case GL_SAMPLES_PASSED_ARB:
159 /* Reset our driver's tracking of query state. */
160 drm_intel_bo_unreference(query->bo);
161 query->bo = NULL;
162 query->first_index = -1;
163 query->last_index = -1;
164
165 brw->query.obj = query;
166 intel->stats_wm++;
167 break;
168
169 case GL_PRIMITIVES_GENERATED:
170 /* We don't actually query the hardware for this value; we keep track of
171 * it a software counter. So just reset the counter.
172 */
173 brw->sol.primitives_generated = 0;
174 brw->sol.counting_primitives_generated = true;
175 break;
176
177 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
178 /* We don't actually query the hardware for this value; we keep track of
179 * it a software counter. So just reset the counter.
180 */
181 brw->sol.primitives_written = 0;
182 brw->sol.counting_primitives_written = true;
183 break;
184
185 default:
186 assert(!"Unrecognized query target in brw_begin_query()");
187 break;
188 }
189 }
190
191 /**
192 * Begin the ARB_occlusion_query query on a query object.
193 */
194 static void
195 brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
196 {
197 struct brw_context *brw = brw_context(ctx);
198 struct intel_context *intel = intel_context(ctx);
199 struct brw_query_object *query = (struct brw_query_object *)q;
200
201 switch (query->Base.Target) {
202 case GL_TIME_ELAPSED_EXT:
203 if (intel->gen >= 6) {
204 BEGIN_BATCH(4);
205 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
206 OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
207 OUT_RELOC(query->bo,
208 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
209 PIPE_CONTROL_GLOBAL_GTT_WRITE |
210 8);
211 OUT_BATCH(0);
212 ADVANCE_BATCH();
213
214 } else {
215 BEGIN_BATCH(4);
216 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
217 PIPE_CONTROL_WRITE_TIMESTAMP);
218 OUT_RELOC(query->bo,
219 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
220 PIPE_CONTROL_GLOBAL_GTT_WRITE |
221 8);
222 OUT_BATCH(0);
223 OUT_BATCH(0);
224 ADVANCE_BATCH();
225 }
226
227 intel_batchbuffer_flush(intel);
228 break;
229
230 case GL_SAMPLES_PASSED_ARB:
231 /* Flush the batchbuffer in case it has writes to our query BO.
232 * Have later queries write to a new query BO so that further rendering
233 * doesn't delay the collection of our results.
234 */
235 if (query->bo) {
236 brw_emit_query_end(brw);
237 intel_batchbuffer_flush(intel);
238
239 drm_intel_bo_unreference(brw->query.bo);
240 brw->query.bo = NULL;
241 }
242
243 brw->query.obj = NULL;
244
245 intel->stats_wm--;
246 break;
247
248 case GL_PRIMITIVES_GENERATED:
249 /* We don't actually query the hardware for this value; we keep track of
250 * it in a software counter. So just read the counter and store it in
251 * the query object.
252 */
253 query->Base.Result = brw->sol.primitives_generated;
254 brw->sol.counting_primitives_generated = false;
255
256 /* And set brw->query.obj to NULL so that this query won't try to wait
257 * for any rendering to complete.
258 */
259 query->bo = NULL;
260 break;
261
262 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
263 /* We don't actually query the hardware for this value; we keep track of
264 * it in a software counter. So just read the counter and store it in
265 * the query object.
266 */
267 query->Base.Result = brw->sol.primitives_written;
268 brw->sol.counting_primitives_written = false;
269
270 /* And set brw->query.obj to NULL so that this query won't try to wait
271 * for any rendering to complete.
272 */
273 query->bo = NULL;
274 break;
275
276 default:
277 assert(!"Unrecognized query target in brw_end_query()");
278 break;
279 }
280 }
281
282 static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
283 {
284 struct brw_query_object *query = (struct brw_query_object *)q;
285
286 brw_queryobj_get_results(ctx, query);
287 query->Base.Ready = true;
288 }
289
290 static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
291 {
292 struct brw_query_object *query = (struct brw_query_object *)q;
293
294 if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
295 brw_queryobj_get_results(ctx, query);
296 query->Base.Ready = true;
297 }
298 }
299
300 /** Called to set up the query BO and account for its aperture space */
301 void
302 brw_prepare_query_begin(struct brw_context *brw)
303 {
304 struct intel_context *intel = &brw->intel;
305
306 /* Skip if we're not doing any queries. */
307 if (!brw->query.obj)
308 return;
309
310 /* Get a new query BO if we're going to need it. */
311 if (brw->query.bo == NULL ||
312 brw->query.index * 2 + 1 >= 4096 / sizeof(uint64_t)) {
313 drm_intel_bo_unreference(brw->query.bo);
314 brw->query.bo = NULL;
315
316 brw->query.bo = drm_intel_bo_alloc(intel->bufmgr, "query", 4096, 1);
317
318 /* clear target buffer */
319 drm_intel_bo_map(brw->query.bo, true);
320 memset((char *)brw->query.bo->virtual, 0, 4096);
321 drm_intel_bo_unmap(brw->query.bo);
322
323 brw->query.index = 0;
324 }
325 }
326
327 /** Called just before primitive drawing to get a beginning PS_DEPTH_COUNT. */
328 void
329 brw_emit_query_begin(struct brw_context *brw)
330 {
331 struct intel_context *intel = &brw->intel;
332 struct gl_context *ctx = &intel->ctx;
333 struct brw_query_object *query = brw->query.obj;
334
335 /* Skip if we're not doing any queries, or we've emitted the start. */
336 if (!query || brw->query.active)
337 return;
338
339 if (intel->gen >= 6) {
340 BEGIN_BATCH(8);
341
342 /* workaround: CS stall required before depth stall. */
343 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
344 OUT_BATCH(PIPE_CONTROL_CS_STALL);
345 OUT_BATCH(0); /* write address */
346 OUT_BATCH(0); /* write data */
347
348 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
349 OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
350 PIPE_CONTROL_WRITE_DEPTH_COUNT);
351 OUT_RELOC(brw->query.bo,
352 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
353 PIPE_CONTROL_GLOBAL_GTT_WRITE |
354 ((brw->query.index * 2) * sizeof(uint64_t)));
355 OUT_BATCH(0);
356 ADVANCE_BATCH();
357
358 } else {
359 BEGIN_BATCH(4);
360 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
361 PIPE_CONTROL_DEPTH_STALL |
362 PIPE_CONTROL_WRITE_DEPTH_COUNT);
363 /* This object could be mapped cacheable, but we don't have an exposed
364 * mechanism to support that. Since it's going uncached, tell GEM that
365 * we're writing to it. The usual clflush should be all that's required
366 * to pick up the results.
367 */
368 OUT_RELOC(brw->query.bo,
369 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
370 PIPE_CONTROL_GLOBAL_GTT_WRITE |
371 ((brw->query.index * 2) * sizeof(uint64_t)));
372 OUT_BATCH(0);
373 OUT_BATCH(0);
374 ADVANCE_BATCH();
375 }
376
377 if (query->bo != brw->query.bo) {
378 if (query->bo != NULL)
379 brw_queryobj_get_results(ctx, query);
380 drm_intel_bo_reference(brw->query.bo);
381 query->bo = brw->query.bo;
382 query->first_index = brw->query.index;
383 }
384 query->last_index = brw->query.index;
385 brw->query.active = true;
386 }
387
388 /** Called at batchbuffer flush to get an ending PS_DEPTH_COUNT */
389 void
390 brw_emit_query_end(struct brw_context *brw)
391 {
392 struct intel_context *intel = &brw->intel;
393
394 if (!brw->query.active)
395 return;
396
397 if (intel->gen >= 6) {
398 BEGIN_BATCH(8);
399 /* workaround: CS stall required before depth stall. */
400 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
401 OUT_BATCH(PIPE_CONTROL_CS_STALL);
402 OUT_BATCH(0); /* write address */
403 OUT_BATCH(0); /* write data */
404
405 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
406 OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
407 PIPE_CONTROL_WRITE_DEPTH_COUNT);
408 OUT_RELOC(brw->query.bo,
409 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
410 PIPE_CONTROL_GLOBAL_GTT_WRITE |
411 ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
412 OUT_BATCH(0);
413 ADVANCE_BATCH();
414
415 } else {
416 BEGIN_BATCH(4);
417 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
418 PIPE_CONTROL_DEPTH_STALL |
419 PIPE_CONTROL_WRITE_DEPTH_COUNT);
420 OUT_RELOC(brw->query.bo,
421 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
422 PIPE_CONTROL_GLOBAL_GTT_WRITE |
423 ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
424 OUT_BATCH(0);
425 OUT_BATCH(0);
426 ADVANCE_BATCH();
427 }
428
429 brw->query.active = false;
430 brw->query.index++;
431 }
432
433 void brw_init_queryobj_functions(struct dd_function_table *functions)
434 {
435 functions->NewQueryObject = brw_new_query_object;
436 functions->DeleteQuery = brw_delete_query;
437 functions->BeginQuery = brw_begin_query;
438 functions->EndQuery = brw_end_query;
439 functions->CheckQuery = brw_check_query;
440 functions->WaitQuery = brw_wait_query;
441 }