mesa: add/update comments in _mesa_copy_buffer_subdata()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_queryobj.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /** @file support for ARB_query_object
29 *
30 * ARB_query_object is implemented by using the PIPE_CONTROL command to stall
31 * execution on the completion of previous depth tests, and write the
32 * current PS_DEPTH_COUNT to a buffer object.
33 *
34 * We use before and after counts when drawing during a query so that
35 * we don't pick up other clients' query data in ours. To reduce overhead,
36 * a single BO is used to record the query data for all active queries at
37 * once. This also gives us a simple bound on how much batchbuffer space is
38 * required for handling queries, so that we can be sure that we won't
39 * have to emit a batchbuffer without getting the ending PS_DEPTH_COUNT.
40 */
41 #include "main/imports.h"
42
43 #include "brw_context.h"
44 #include "brw_state.h"
45 #include "intel_batchbuffer.h"
46 #include "intel_reg.h"
47
48 /** Waits on the query object's BO and totals the results for this query */
49 static void
50 brw_queryobj_get_results(struct gl_context *ctx,
51 struct brw_query_object *query)
52 {
53 struct intel_context *intel = intel_context(ctx);
54
55 int i;
56 uint64_t *results;
57
58 if (query->bo == NULL)
59 return;
60
61 drm_intel_bo_map(query->bo, false);
62 results = query->bo->virtual;
63 switch (query->Base.Target) {
64 case GL_TIME_ELAPSED_EXT:
65 if (intel->gen >= 6)
66 query->Base.Result += 80 * (results[1] - results[0]);
67 else
68 query->Base.Result += 1000 * ((results[1] >> 32) - (results[0] >> 32));
69 break;
70
71 case GL_SAMPLES_PASSED_ARB:
72 /* Map and count the pixels from the current query BO */
73 for (i = query->first_index; i <= query->last_index; i++) {
74 query->Base.Result += results[i * 2 + 1] - results[i * 2];
75 }
76 break;
77
78 case GL_PRIMITIVES_GENERATED:
79 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
80 /* We don't actually query the hardware for this value, so query->bo
81 * should always be NULL and execution should never reach here.
82 */
83 assert(!"Unreachable");
84 break;
85
86 default:
87 assert(!"Unrecognized query target in brw_queryobj_get_results()");
88 break;
89 }
90 drm_intel_bo_unmap(query->bo);
91
92 drm_intel_bo_unreference(query->bo);
93 query->bo = NULL;
94 }
95
96 static struct gl_query_object *
97 brw_new_query_object(struct gl_context *ctx, GLuint id)
98 {
99 struct brw_query_object *query;
100
101 query = calloc(1, sizeof(struct brw_query_object));
102
103 query->Base.Id = id;
104 query->Base.Result = 0;
105 query->Base.Active = false;
106 query->Base.Ready = true;
107
108 return &query->Base;
109 }
110
111 static void
112 brw_delete_query(struct gl_context *ctx, struct gl_query_object *q)
113 {
114 struct brw_query_object *query = (struct brw_query_object *)q;
115
116 drm_intel_bo_unreference(query->bo);
117 free(query);
118 }
119
120 static void
121 brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
122 {
123 struct brw_context *brw = brw_context(ctx);
124 struct intel_context *intel = intel_context(ctx);
125 struct brw_query_object *query = (struct brw_query_object *)q;
126
127 switch (query->Base.Target) {
128 case GL_TIME_ELAPSED_EXT:
129 drm_intel_bo_unreference(query->bo);
130 query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query",
131 4096, 4096);
132
133 if (intel->gen >= 6) {
134 BEGIN_BATCH(4);
135 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
136 OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
137 OUT_RELOC(query->bo,
138 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
139 PIPE_CONTROL_GLOBAL_GTT_WRITE |
140 0);
141 OUT_BATCH(0);
142 ADVANCE_BATCH();
143
144 } else {
145 BEGIN_BATCH(4);
146 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
147 PIPE_CONTROL_WRITE_TIMESTAMP);
148 OUT_RELOC(query->bo,
149 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
150 PIPE_CONTROL_GLOBAL_GTT_WRITE |
151 0);
152 OUT_BATCH(0);
153 OUT_BATCH(0);
154 ADVANCE_BATCH();
155 }
156 break;
157
158 case GL_SAMPLES_PASSED_ARB:
159 /* Reset our driver's tracking of query state. */
160 drm_intel_bo_unreference(query->bo);
161 query->bo = NULL;
162 query->first_index = -1;
163 query->last_index = -1;
164
165 brw->query.obj = query;
166 intel->stats_wm++;
167 break;
168
169 case GL_PRIMITIVES_GENERATED:
170 /* We don't actually query the hardware for this value; we keep track of
171 * it a software counter. So just reset the counter.
172 */
173 brw->sol.primitives_generated = 0;
174 break;
175
176 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
177 /* We don't actually query the hardware for this value; we keep track of
178 * it a software counter. So just reset the counter.
179 */
180 brw->sol.primitives_written = 0;
181 break;
182
183 default:
184 assert(!"Unrecognized query target in brw_begin_query()");
185 break;
186 }
187 }
188
189 /**
190 * Begin the ARB_occlusion_query query on a query object.
191 */
192 static void
193 brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
194 {
195 struct brw_context *brw = brw_context(ctx);
196 struct intel_context *intel = intel_context(ctx);
197 struct brw_query_object *query = (struct brw_query_object *)q;
198
199 switch (query->Base.Target) {
200 case GL_TIME_ELAPSED_EXT:
201 if (intel->gen >= 6) {
202 BEGIN_BATCH(4);
203 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
204 OUT_BATCH(PIPE_CONTROL_WRITE_TIMESTAMP);
205 OUT_RELOC(query->bo,
206 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
207 PIPE_CONTROL_GLOBAL_GTT_WRITE |
208 8);
209 OUT_BATCH(0);
210 ADVANCE_BATCH();
211
212 } else {
213 BEGIN_BATCH(4);
214 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
215 PIPE_CONTROL_WRITE_TIMESTAMP);
216 OUT_RELOC(query->bo,
217 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
218 PIPE_CONTROL_GLOBAL_GTT_WRITE |
219 8);
220 OUT_BATCH(0);
221 OUT_BATCH(0);
222 ADVANCE_BATCH();
223 }
224
225 intel_batchbuffer_flush(intel);
226 break;
227
228 case GL_SAMPLES_PASSED_ARB:
229 /* Flush the batchbuffer in case it has writes to our query BO.
230 * Have later queries write to a new query BO so that further rendering
231 * doesn't delay the collection of our results.
232 */
233 if (query->bo) {
234 brw_emit_query_end(brw);
235 intel_batchbuffer_flush(intel);
236
237 drm_intel_bo_unreference(brw->query.bo);
238 brw->query.bo = NULL;
239 }
240
241 brw->query.obj = NULL;
242
243 intel->stats_wm--;
244 break;
245
246 case GL_PRIMITIVES_GENERATED:
247 /* We don't actually query the hardware for this value; we keep track of
248 * it in a software counter. So just read the counter and store it in
249 * the query object.
250 */
251 query->Base.Result = brw->sol.primitives_generated;
252
253 /* And set brw->query.obj to NULL so that this query won't try to wait
254 * for any rendering to complete.
255 */
256 query->bo = NULL;
257 break;
258
259 case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
260 /* We don't actually query the hardware for this value; we keep track of
261 * it in a software counter. So just read the counter and store it in
262 * the query object.
263 */
264 query->Base.Result = brw->sol.primitives_written;
265
266 /* And set brw->query.obj to NULL so that this query won't try to wait
267 * for any rendering to complete.
268 */
269 query->bo = NULL;
270 break;
271
272 default:
273 assert(!"Unrecognized query target in brw_end_query()");
274 break;
275 }
276 }
277
278 static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
279 {
280 struct brw_query_object *query = (struct brw_query_object *)q;
281
282 brw_queryobj_get_results(ctx, query);
283 query->Base.Ready = true;
284 }
285
286 static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
287 {
288 struct brw_query_object *query = (struct brw_query_object *)q;
289
290 if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
291 brw_queryobj_get_results(ctx, query);
292 query->Base.Ready = true;
293 }
294 }
295
296 /** Called to set up the query BO and account for its aperture space */
297 void
298 brw_prepare_query_begin(struct brw_context *brw)
299 {
300 struct intel_context *intel = &brw->intel;
301
302 /* Skip if we're not doing any queries. */
303 if (!brw->query.obj)
304 return;
305
306 /* Get a new query BO if we're going to need it. */
307 if (brw->query.bo == NULL ||
308 brw->query.index * 2 + 1 >= 4096 / sizeof(uint64_t)) {
309 drm_intel_bo_unreference(brw->query.bo);
310 brw->query.bo = NULL;
311
312 brw->query.bo = drm_intel_bo_alloc(intel->bufmgr, "query", 4096, 1);
313
314 /* clear target buffer */
315 drm_intel_bo_map(brw->query.bo, true);
316 memset((char *)brw->query.bo->virtual, 0, 4096);
317 drm_intel_bo_unmap(brw->query.bo);
318
319 brw->query.index = 0;
320 }
321 }
322
323 /** Called just before primitive drawing to get a beginning PS_DEPTH_COUNT. */
324 void
325 brw_emit_query_begin(struct brw_context *brw)
326 {
327 struct intel_context *intel = &brw->intel;
328 struct gl_context *ctx = &intel->ctx;
329 struct brw_query_object *query = brw->query.obj;
330
331 /* Skip if we're not doing any queries, or we've emitted the start. */
332 if (!query || brw->query.active)
333 return;
334
335 if (intel->gen >= 6) {
336 BEGIN_BATCH(8);
337
338 /* workaround: CS stall required before depth stall. */
339 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
340 OUT_BATCH(PIPE_CONTROL_CS_STALL);
341 OUT_BATCH(0); /* write address */
342 OUT_BATCH(0); /* write data */
343
344 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
345 OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
346 PIPE_CONTROL_WRITE_DEPTH_COUNT);
347 OUT_RELOC(brw->query.bo,
348 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
349 PIPE_CONTROL_GLOBAL_GTT_WRITE |
350 ((brw->query.index * 2) * sizeof(uint64_t)));
351 OUT_BATCH(0);
352 ADVANCE_BATCH();
353
354 } else {
355 BEGIN_BATCH(4);
356 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
357 PIPE_CONTROL_DEPTH_STALL |
358 PIPE_CONTROL_WRITE_DEPTH_COUNT);
359 /* This object could be mapped cacheable, but we don't have an exposed
360 * mechanism to support that. Since it's going uncached, tell GEM that
361 * we're writing to it. The usual clflush should be all that's required
362 * to pick up the results.
363 */
364 OUT_RELOC(brw->query.bo,
365 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
366 PIPE_CONTROL_GLOBAL_GTT_WRITE |
367 ((brw->query.index * 2) * sizeof(uint64_t)));
368 OUT_BATCH(0);
369 OUT_BATCH(0);
370 ADVANCE_BATCH();
371 }
372
373 if (query->bo != brw->query.bo) {
374 if (query->bo != NULL)
375 brw_queryobj_get_results(ctx, query);
376 drm_intel_bo_reference(brw->query.bo);
377 query->bo = brw->query.bo;
378 query->first_index = brw->query.index;
379 }
380 query->last_index = brw->query.index;
381 brw->query.active = true;
382 }
383
384 /** Called at batchbuffer flush to get an ending PS_DEPTH_COUNT */
385 void
386 brw_emit_query_end(struct brw_context *brw)
387 {
388 struct intel_context *intel = &brw->intel;
389
390 if (!brw->query.active)
391 return;
392
393 if (intel->gen >= 6) {
394 BEGIN_BATCH(8);
395 /* workaround: CS stall required before depth stall. */
396 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
397 OUT_BATCH(PIPE_CONTROL_CS_STALL);
398 OUT_BATCH(0); /* write address */
399 OUT_BATCH(0); /* write data */
400
401 OUT_BATCH(_3DSTATE_PIPE_CONTROL);
402 OUT_BATCH(PIPE_CONTROL_DEPTH_STALL |
403 PIPE_CONTROL_WRITE_DEPTH_COUNT);
404 OUT_RELOC(brw->query.bo,
405 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
406 PIPE_CONTROL_GLOBAL_GTT_WRITE |
407 ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
408 OUT_BATCH(0);
409 ADVANCE_BATCH();
410
411 } else {
412 BEGIN_BATCH(4);
413 OUT_BATCH(_3DSTATE_PIPE_CONTROL |
414 PIPE_CONTROL_DEPTH_STALL |
415 PIPE_CONTROL_WRITE_DEPTH_COUNT);
416 OUT_RELOC(brw->query.bo,
417 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
418 PIPE_CONTROL_GLOBAL_GTT_WRITE |
419 ((brw->query.index * 2 + 1) * sizeof(uint64_t)));
420 OUT_BATCH(0);
421 OUT_BATCH(0);
422 ADVANCE_BATCH();
423 }
424
425 brw->query.active = false;
426 brw->query.index++;
427 }
428
429 void brw_init_queryobj_functions(struct dd_function_table *functions)
430 {
431 functions->NewQueryObject = brw_new_query_object;
432 functions->DeleteQuery = brw_delete_query;
433 functions->BeginQuery = brw_begin_query;
434 functions->EndQuery = brw_end_query;
435 functions->CheckQuery = brw_check_query;
436 functions->WaitQuery = brw_wait_query;
437 }