nouveau: use bool instead of boolean
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_query.c
1 /*
2 * Copyright 2011 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christoph Bumiller
23 */
24
25 #define NV50_PUSH_EXPLICIT_SPACE_CHECKING
26
27 #include "nv50/nv50_context.h"
28 #include "nv_object.xml.h"
29
30 #define NV50_QUERY_STATE_READY 0
31 #define NV50_QUERY_STATE_ACTIVE 1
32 #define NV50_QUERY_STATE_ENDED 2
33 #define NV50_QUERY_STATE_FLUSHED 3
34
35 /* XXX: Nested queries, and simultaneous queries on multiple gallium contexts
36 * (since we use only a single GPU channel per screen) will not work properly.
37 *
38 * The first is not that big of an issue because OpenGL does not allow nested
39 * queries anyway.
40 */
41
42 struct nv50_query {
43 uint32_t *data;
44 uint16_t type;
45 uint16_t index;
46 uint32_t sequence;
47 struct nouveau_bo *bo;
48 uint32_t base;
49 uint32_t offset; /* base + i * 32 */
50 uint8_t state;
51 bool is64bit;
52 int nesting; /* only used for occlusion queries */
53 struct nouveau_mm_allocation *mm;
54 struct nouveau_fence *fence;
55 };
56
57 #define NV50_QUERY_ALLOC_SPACE 256
58
59 static INLINE struct nv50_query *
60 nv50_query(struct pipe_query *pipe)
61 {
62 return (struct nv50_query *)pipe;
63 }
64
65 static bool
66 nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size)
67 {
68 struct nv50_screen *screen = nv50->screen;
69 int ret;
70
71 if (q->bo) {
72 nouveau_bo_ref(NULL, &q->bo);
73 if (q->mm) {
74 if (q->state == NV50_QUERY_STATE_READY)
75 nouveau_mm_free(q->mm);
76 else
77 nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work,
78 q->mm);
79 }
80 }
81 if (size) {
82 q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
83 if (!q->bo)
84 return false;
85 q->offset = q->base;
86
87 ret = nouveau_bo_map(q->bo, 0, screen->base.client);
88 if (ret) {
89 nv50_query_allocate(nv50, q, 0);
90 return false;
91 }
92 q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
93 }
94 return true;
95 }
96
97 static void
98 nv50_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
99 {
100 nv50_query_allocate(nv50_context(pipe), nv50_query(pq), 0);
101 nouveau_fence_ref(NULL, &nv50_query(pq)->fence);
102 FREE(nv50_query(pq));
103 }
104
105 static struct pipe_query *
106 nv50_query_create(struct pipe_context *pipe, unsigned type, unsigned index)
107 {
108 struct nv50_context *nv50 = nv50_context(pipe);
109 struct nv50_query *q;
110
111 q = CALLOC_STRUCT(nv50_query);
112 if (!q)
113 return NULL;
114
115 if (!nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE)) {
116 FREE(q);
117 return NULL;
118 }
119
120 q->is64bit = (type == PIPE_QUERY_PRIMITIVES_GENERATED ||
121 type == PIPE_QUERY_PRIMITIVES_EMITTED ||
122 type == PIPE_QUERY_SO_STATISTICS ||
123 type == PIPE_QUERY_PIPELINE_STATISTICS);
124 q->type = type;
125
126 if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) {
127 q->offset -= 32;
128 q->data -= 32 / sizeof(*q->data); /* we advance before query_begin ! */
129 }
130
131 return (struct pipe_query *)q;
132 }
133
134 static void
135 nv50_query_get(struct nouveau_pushbuf *push, struct nv50_query *q,
136 unsigned offset, uint32_t get)
137 {
138 offset += q->offset;
139
140 PUSH_SPACE(push, 5);
141 PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_WR);
142 BEGIN_NV04(push, NV50_3D(QUERY_ADDRESS_HIGH), 4);
143 PUSH_DATAh(push, q->bo->offset + offset);
144 PUSH_DATA (push, q->bo->offset + offset);
145 PUSH_DATA (push, q->sequence);
146 PUSH_DATA (push, get);
147 }
148
149 static boolean
150 nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
151 {
152 struct nv50_context *nv50 = nv50_context(pipe);
153 struct nouveau_pushbuf *push = nv50->base.pushbuf;
154 struct nv50_query *q = nv50_query(pq);
155
156 /* For occlusion queries we have to change the storage, because a previous
157 * query might set the initial render conition to false even *after* we re-
158 * initialized it to true.
159 */
160 if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) {
161 q->offset += 32;
162 q->data += 32 / sizeof(*q->data);
163 if (q->offset - q->base == NV50_QUERY_ALLOC_SPACE)
164 nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE);
165
166 /* XXX: can we do this with the GPU, and sync with respect to a previous
167 * query ?
168 */
169 q->data[0] = q->sequence; /* initialize sequence */
170 q->data[1] = 1; /* initial render condition = true */
171 q->data[4] = q->sequence + 1; /* for comparison COND_MODE */
172 q->data[5] = 0;
173 }
174 if (!q->is64bit)
175 q->data[0] = q->sequence++; /* the previously used one */
176
177 switch (q->type) {
178 case PIPE_QUERY_OCCLUSION_COUNTER:
179 q->nesting = nv50->screen->num_occlusion_queries_active++;
180 if (q->nesting) {
181 nv50_query_get(push, q, 0x10, 0x0100f002);
182 } else {
183 PUSH_SPACE(push, 4);
184 BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1);
185 PUSH_DATA (push, NV50_3D_COUNTER_RESET_SAMPLECNT);
186 BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
187 PUSH_DATA (push, 1);
188 }
189 break;
190 case PIPE_QUERY_PRIMITIVES_GENERATED:
191 nv50_query_get(push, q, 0x10, 0x06805002);
192 break;
193 case PIPE_QUERY_PRIMITIVES_EMITTED:
194 nv50_query_get(push, q, 0x10, 0x05805002);
195 break;
196 case PIPE_QUERY_SO_STATISTICS:
197 nv50_query_get(push, q, 0x20, 0x05805002);
198 nv50_query_get(push, q, 0x30, 0x06805002);
199 break;
200 case PIPE_QUERY_PIPELINE_STATISTICS:
201 nv50_query_get(push, q, 0x80, 0x00801002); /* VFETCH, VERTICES */
202 nv50_query_get(push, q, 0x90, 0x01801002); /* VFETCH, PRIMS */
203 nv50_query_get(push, q, 0xa0, 0x02802002); /* VP, LAUNCHES */
204 nv50_query_get(push, q, 0xb0, 0x03806002); /* GP, LAUNCHES */
205 nv50_query_get(push, q, 0xc0, 0x04806002); /* GP, PRIMS_OUT */
206 nv50_query_get(push, q, 0xd0, 0x07804002); /* RAST, PRIMS_IN */
207 nv50_query_get(push, q, 0xe0, 0x08804002); /* RAST, PRIMS_OUT */
208 nv50_query_get(push, q, 0xf0, 0x0980a002); /* ROP, PIXELS */
209 break;
210 case PIPE_QUERY_TIME_ELAPSED:
211 nv50_query_get(push, q, 0x10, 0x00005002);
212 break;
213 default:
214 break;
215 }
216 q->state = NV50_QUERY_STATE_ACTIVE;
217 return true;
218 }
219
220 static void
221 nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq)
222 {
223 struct nv50_context *nv50 = nv50_context(pipe);
224 struct nouveau_pushbuf *push = nv50->base.pushbuf;
225 struct nv50_query *q = nv50_query(pq);
226
227 q->state = NV50_QUERY_STATE_ENDED;
228
229 switch (q->type) {
230 case PIPE_QUERY_OCCLUSION_COUNTER:
231 nv50_query_get(push, q, 0, 0x0100f002);
232 if (--nv50->screen->num_occlusion_queries_active == 0) {
233 PUSH_SPACE(push, 2);
234 BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
235 PUSH_DATA (push, 0);
236 }
237 break;
238 case PIPE_QUERY_PRIMITIVES_GENERATED:
239 nv50_query_get(push, q, 0, 0x06805002);
240 break;
241 case PIPE_QUERY_PRIMITIVES_EMITTED:
242 nv50_query_get(push, q, 0, 0x05805002);
243 break;
244 case PIPE_QUERY_SO_STATISTICS:
245 nv50_query_get(push, q, 0x00, 0x05805002);
246 nv50_query_get(push, q, 0x10, 0x06805002);
247 break;
248 case PIPE_QUERY_PIPELINE_STATISTICS:
249 nv50_query_get(push, q, 0x00, 0x00801002); /* VFETCH, VERTICES */
250 nv50_query_get(push, q, 0x10, 0x01801002); /* VFETCH, PRIMS */
251 nv50_query_get(push, q, 0x20, 0x02802002); /* VP, LAUNCHES */
252 nv50_query_get(push, q, 0x30, 0x03806002); /* GP, LAUNCHES */
253 nv50_query_get(push, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */
254 nv50_query_get(push, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */
255 nv50_query_get(push, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
256 nv50_query_get(push, q, 0x70, 0x0980a002); /* ROP, PIXELS */
257 break;
258 case PIPE_QUERY_TIMESTAMP:
259 q->sequence++;
260 /* fall through */
261 case PIPE_QUERY_TIME_ELAPSED:
262 nv50_query_get(push, q, 0, 0x00005002);
263 break;
264 case PIPE_QUERY_GPU_FINISHED:
265 q->sequence++;
266 nv50_query_get(push, q, 0, 0x1000f010);
267 break;
268 case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET:
269 nv50_query_get(push, q, 0, 0x0d005002 | (q->index << 5));
270 break;
271 case PIPE_QUERY_TIMESTAMP_DISJOINT:
272 /* This query is not issued on GPU because disjoint is forced to false */
273 q->state = NV50_QUERY_STATE_READY;
274 break;
275 default:
276 assert(0);
277 break;
278 }
279
280 if (q->is64bit)
281 nouveau_fence_ref(nv50->screen->base.fence.current, &q->fence);
282 }
283
284 static INLINE void
285 nv50_query_update(struct nv50_query *q)
286 {
287 if (q->is64bit) {
288 if (nouveau_fence_signalled(q->fence))
289 q->state = NV50_QUERY_STATE_READY;
290 } else {
291 if (q->data[0] == q->sequence)
292 q->state = NV50_QUERY_STATE_READY;
293 }
294 }
295
296 static boolean
297 nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq,
298 boolean wait, union pipe_query_result *result)
299 {
300 struct nv50_context *nv50 = nv50_context(pipe);
301 struct nv50_query *q = nv50_query(pq);
302 uint64_t *res64 = (uint64_t *)result;
303 uint32_t *res32 = (uint32_t *)result;
304 uint8_t *res8 = (uint8_t *)result;
305 uint64_t *data64 = (uint64_t *)q->data;
306 int i;
307
308 if (q->state != NV50_QUERY_STATE_READY)
309 nv50_query_update(q);
310
311 if (q->state != NV50_QUERY_STATE_READY) {
312 if (!wait) {
313 /* for broken apps that spin on GL_QUERY_RESULT_AVAILABLE */
314 if (q->state != NV50_QUERY_STATE_FLUSHED) {
315 q->state = NV50_QUERY_STATE_FLUSHED;
316 PUSH_KICK(nv50->base.pushbuf);
317 }
318 return false;
319 }
320 if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nv50->screen->base.client))
321 return false;
322 }
323 q->state = NV50_QUERY_STATE_READY;
324
325 switch (q->type) {
326 case PIPE_QUERY_GPU_FINISHED:
327 res8[0] = true;
328 break;
329 case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */
330 res64[0] = q->data[1] - q->data[5];
331 break;
332 case PIPE_QUERY_PRIMITIVES_GENERATED: /* u64 count, u64 time */
333 case PIPE_QUERY_PRIMITIVES_EMITTED: /* u64 count, u64 time */
334 res64[0] = data64[0] - data64[2];
335 break;
336 case PIPE_QUERY_SO_STATISTICS:
337 res64[0] = data64[0] - data64[4];
338 res64[1] = data64[2] - data64[6];
339 break;
340 case PIPE_QUERY_PIPELINE_STATISTICS:
341 for (i = 0; i < 8; ++i)
342 res64[i] = data64[i * 2] - data64[16 + i * 2];
343 break;
344 case PIPE_QUERY_TIMESTAMP:
345 res64[0] = data64[1];
346 break;
347 case PIPE_QUERY_TIMESTAMP_DISJOINT:
348 res64[0] = 1000000000;
349 res8[8] = false;
350 break;
351 case PIPE_QUERY_TIME_ELAPSED:
352 res64[0] = data64[1] - data64[3];
353 break;
354 case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET:
355 res32[0] = q->data[1];
356 break;
357 default:
358 return false;
359 }
360
361 return true;
362 }
363
364 void
365 nv84_query_fifo_wait(struct nouveau_pushbuf *push, struct pipe_query *pq)
366 {
367 struct nv50_query *q = nv50_query(pq);
368 unsigned offset = q->offset;
369
370 PUSH_SPACE(push, 5);
371 PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
372 BEGIN_NV04(push, SUBC_3D(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH), 4);
373 PUSH_DATAh(push, q->bo->offset + offset);
374 PUSH_DATA (push, q->bo->offset + offset);
375 PUSH_DATA (push, q->sequence);
376 PUSH_DATA (push, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
377 }
378
379 static void
380 nv50_render_condition(struct pipe_context *pipe,
381 struct pipe_query *pq,
382 boolean condition, uint mode)
383 {
384 struct nv50_context *nv50 = nv50_context(pipe);
385 struct nouveau_pushbuf *push = nv50->base.pushbuf;
386 struct nv50_query *q;
387 uint32_t cond;
388 bool wait =
389 mode != PIPE_RENDER_COND_NO_WAIT &&
390 mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
391
392 if (!pq) {
393 cond = NV50_3D_COND_MODE_ALWAYS;
394 }
395 else {
396 q = nv50_query(pq);
397 /* NOTE: comparison of 2 queries only works if both have completed */
398 switch (q->type) {
399 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
400 cond = condition ? NV50_3D_COND_MODE_EQUAL :
401 NV50_3D_COND_MODE_NOT_EQUAL;
402 wait = true;
403 break;
404 case PIPE_QUERY_OCCLUSION_COUNTER:
405 case PIPE_QUERY_OCCLUSION_PREDICATE:
406 if (likely(!condition)) {
407 if (unlikely(q->nesting))
408 cond = wait ? NV50_3D_COND_MODE_NOT_EQUAL :
409 NV50_3D_COND_MODE_ALWAYS;
410 else
411 cond = NV50_3D_COND_MODE_RES_NON_ZERO;
412 } else {
413 cond = wait ? NV50_3D_COND_MODE_EQUAL : NV50_3D_COND_MODE_ALWAYS;
414 }
415 break;
416 default:
417 assert(!"render condition query not a predicate");
418 cond = NV50_3D_COND_MODE_ALWAYS;
419 break;
420 }
421 }
422
423 nv50->cond_query = pq;
424 nv50->cond_cond = condition;
425 nv50->cond_condmode = cond;
426 nv50->cond_mode = mode;
427
428 if (!pq) {
429 PUSH_SPACE(push, 2);
430 BEGIN_NV04(push, NV50_3D(COND_MODE), 1);
431 PUSH_DATA (push, cond);
432 return;
433 }
434
435 PUSH_SPACE(push, 9);
436
437 if (wait) {
438 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
439 PUSH_DATA (push, 0);
440 }
441
442 PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
443 BEGIN_NV04(push, NV50_3D(COND_ADDRESS_HIGH), 3);
444 PUSH_DATAh(push, q->bo->offset + q->offset);
445 PUSH_DATA (push, q->bo->offset + q->offset);
446 PUSH_DATA (push, cond);
447
448 BEGIN_NV04(push, NV50_2D(COND_ADDRESS_HIGH), 2);
449 PUSH_DATAh(push, q->bo->offset + q->offset);
450 PUSH_DATA (push, q->bo->offset + q->offset);
451 }
452
453 void
454 nv50_query_pushbuf_submit(struct nouveau_pushbuf *push,
455 struct pipe_query *pq, unsigned result_offset)
456 {
457 struct nv50_query *q = nv50_query(pq);
458
459 /* XXX: does this exist ? */
460 #define NV50_IB_ENTRY_1_NO_PREFETCH (0 << (31 - 8))
461
462 PUSH_REFN(push, q->bo, NOUVEAU_BO_RD | NOUVEAU_BO_GART);
463 nouveau_pushbuf_space(push, 0, 0, 1);
464 nouveau_pushbuf_data(push, q->bo, q->offset + result_offset, 4 |
465 NV50_IB_ENTRY_1_NO_PREFETCH);
466 }
467
468 void
469 nva0_so_target_save_offset(struct pipe_context *pipe,
470 struct pipe_stream_output_target *ptarg,
471 unsigned index, bool serialize)
472 {
473 struct nv50_so_target *targ = nv50_so_target(ptarg);
474
475 if (serialize) {
476 struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
477 PUSH_SPACE(push, 2);
478 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
479 PUSH_DATA (push, 0);
480 }
481
482 nv50_query(targ->pq)->index = index;
483 nv50_query_end(pipe, targ->pq);
484 }
485
486 void
487 nv50_init_query_functions(struct nv50_context *nv50)
488 {
489 struct pipe_context *pipe = &nv50->base.pipe;
490
491 pipe->create_query = nv50_query_create;
492 pipe->destroy_query = nv50_query_destroy;
493 pipe->begin_query = nv50_query_begin;
494 pipe->end_query = nv50_query_end;
495 pipe->get_query_result = nv50_query_result;
496 pipe->render_condition = nv50_render_condition;
497 }