Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_query.c
1 /*
2 * Copyright 2011 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christoph Bumiller
23 */
24
25 #define NV50_PUSH_EXPLICIT_SPACE_CHECKING
26
27 #include "nv50/nv50_context.h"
28 #include "nv_object.xml.h"
29
30 #define NV50_QUERY_STATE_READY 0
31 #define NV50_QUERY_STATE_ACTIVE 1
32 #define NV50_QUERY_STATE_ENDED 2
33 #define NV50_QUERY_STATE_FLUSHED 3
34
35 /* XXX: Nested queries, and simultaneous queries on multiple gallium contexts
36 * (since we use only a single GPU channel per screen) will not work properly.
37 *
38 * The first is not that big of an issue because OpenGL does not allow nested
39 * queries anyway.
40 */
41
42 struct nv50_query {
43 uint32_t *data;
44 uint16_t type;
45 uint16_t index;
46 uint32_t sequence;
47 struct nouveau_bo *bo;
48 uint32_t base;
49 uint32_t offset; /* base + i * 32 */
50 uint8_t state;
51 boolean is64bit;
52 struct nouveau_mm_allocation *mm;
53 struct nouveau_fence *fence;
54 };
55
56 #define NV50_QUERY_ALLOC_SPACE 256
57
58 static INLINE struct nv50_query *
59 nv50_query(struct pipe_query *pipe)
60 {
61 return (struct nv50_query *)pipe;
62 }
63
64 static boolean
65 nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size)
66 {
67 struct nv50_screen *screen = nv50->screen;
68 int ret;
69
70 if (q->bo) {
71 nouveau_bo_ref(NULL, &q->bo);
72 if (q->mm) {
73 if (q->state == NV50_QUERY_STATE_READY)
74 nouveau_mm_free(q->mm);
75 else
76 nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work,
77 q->mm);
78 }
79 }
80 if (size) {
81 q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
82 if (!q->bo)
83 return FALSE;
84 q->offset = q->base;
85
86 ret = nouveau_bo_map(q->bo, 0, screen->base.client);
87 if (ret) {
88 nv50_query_allocate(nv50, q, 0);
89 return FALSE;
90 }
91 q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
92 }
93 return TRUE;
94 }
95
96 static void
97 nv50_query_destroy(struct pipe_context *pipe, struct pipe_query *pq)
98 {
99 nv50_query_allocate(nv50_context(pipe), nv50_query(pq), 0);
100 nouveau_fence_ref(NULL, &nv50_query(pq)->fence);
101 FREE(nv50_query(pq));
102 }
103
104 static struct pipe_query *
105 nv50_query_create(struct pipe_context *pipe, unsigned type, unsigned index)
106 {
107 struct nv50_context *nv50 = nv50_context(pipe);
108 struct nv50_query *q;
109
110 q = CALLOC_STRUCT(nv50_query);
111 if (!q)
112 return NULL;
113
114 if (!nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE)) {
115 FREE(q);
116 return NULL;
117 }
118
119 q->is64bit = (type == PIPE_QUERY_PRIMITIVES_GENERATED ||
120 type == PIPE_QUERY_PRIMITIVES_EMITTED ||
121 type == PIPE_QUERY_SO_STATISTICS ||
122 type == PIPE_QUERY_PIPELINE_STATISTICS);
123 q->type = type;
124
125 if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) {
126 q->offset -= 32;
127 q->data -= 32 / sizeof(*q->data); /* we advance before query_begin ! */
128 }
129
130 return (struct pipe_query *)q;
131 }
132
133 static void
134 nv50_query_get(struct nouveau_pushbuf *push, struct nv50_query *q,
135 unsigned offset, uint32_t get)
136 {
137 offset += q->offset;
138
139 PUSH_SPACE(push, 5);
140 PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_WR);
141 BEGIN_NV04(push, NV50_3D(QUERY_ADDRESS_HIGH), 4);
142 PUSH_DATAh(push, q->bo->offset + offset);
143 PUSH_DATA (push, q->bo->offset + offset);
144 PUSH_DATA (push, q->sequence);
145 PUSH_DATA (push, get);
146 }
147
148 static boolean
149 nv50_query_begin(struct pipe_context *pipe, struct pipe_query *pq)
150 {
151 struct nv50_context *nv50 = nv50_context(pipe);
152 struct nouveau_pushbuf *push = nv50->base.pushbuf;
153 struct nv50_query *q = nv50_query(pq);
154
155 /* For occlusion queries we have to change the storage, because a previous
156 * query might set the initial render conition to FALSE even *after* we re-
157 * initialized it to TRUE.
158 */
159 if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) {
160 q->offset += 32;
161 q->data += 32 / sizeof(*q->data);
162 if (q->offset - q->base == NV50_QUERY_ALLOC_SPACE)
163 nv50_query_allocate(nv50, q, NV50_QUERY_ALLOC_SPACE);
164
165 /* XXX: can we do this with the GPU, and sync with respect to a previous
166 * query ?
167 */
168 q->data[0] = q->sequence; /* initialize sequence */
169 q->data[1] = 1; /* initial render condition = TRUE */
170 q->data[4] = q->sequence + 1; /* for comparison COND_MODE */
171 q->data[5] = 0;
172 }
173 if (!q->is64bit)
174 q->data[0] = q->sequence++; /* the previously used one */
175
176 switch (q->type) {
177 case PIPE_QUERY_OCCLUSION_COUNTER:
178 PUSH_SPACE(push, 4);
179 BEGIN_NV04(push, NV50_3D(COUNTER_RESET), 1);
180 PUSH_DATA (push, NV50_3D_COUNTER_RESET_SAMPLECNT);
181 BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
182 PUSH_DATA (push, 1);
183 break;
184 case PIPE_QUERY_PRIMITIVES_GENERATED:
185 nv50_query_get(push, q, 0x10, 0x06805002);
186 break;
187 case PIPE_QUERY_PRIMITIVES_EMITTED:
188 nv50_query_get(push, q, 0x10, 0x05805002);
189 break;
190 case PIPE_QUERY_SO_STATISTICS:
191 nv50_query_get(push, q, 0x20, 0x05805002);
192 nv50_query_get(push, q, 0x30, 0x06805002);
193 break;
194 case PIPE_QUERY_PIPELINE_STATISTICS:
195 nv50_query_get(push, q, 0x80, 0x00801002); /* VFETCH, VERTICES */
196 nv50_query_get(push, q, 0x90, 0x01801002); /* VFETCH, PRIMS */
197 nv50_query_get(push, q, 0xa0, 0x02802002); /* VP, LAUNCHES */
198 nv50_query_get(push, q, 0xb0, 0x03806002); /* GP, LAUNCHES */
199 nv50_query_get(push, q, 0xc0, 0x04806002); /* GP, PRIMS_OUT */
200 nv50_query_get(push, q, 0xd0, 0x07804002); /* RAST, PRIMS_IN */
201 nv50_query_get(push, q, 0xe0, 0x08804002); /* RAST, PRIMS_OUT */
202 nv50_query_get(push, q, 0xf0, 0x0980a002); /* ROP, PIXELS */
203 break;
204 case PIPE_QUERY_TIME_ELAPSED:
205 nv50_query_get(push, q, 0x10, 0x00005002);
206 break;
207 default:
208 break;
209 }
210 q->state = NV50_QUERY_STATE_ACTIVE;
211 return true;
212 }
213
214 static void
215 nv50_query_end(struct pipe_context *pipe, struct pipe_query *pq)
216 {
217 struct nv50_context *nv50 = nv50_context(pipe);
218 struct nouveau_pushbuf *push = nv50->base.pushbuf;
219 struct nv50_query *q = nv50_query(pq);
220
221 q->state = NV50_QUERY_STATE_ENDED;
222
223 switch (q->type) {
224 case PIPE_QUERY_OCCLUSION_COUNTER:
225 nv50_query_get(push, q, 0, 0x0100f002);
226 PUSH_SPACE(push, 2);
227 BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
228 PUSH_DATA (push, 0);
229 break;
230 case PIPE_QUERY_PRIMITIVES_GENERATED:
231 nv50_query_get(push, q, 0, 0x06805002);
232 break;
233 case PIPE_QUERY_PRIMITIVES_EMITTED:
234 nv50_query_get(push, q, 0, 0x05805002);
235 break;
236 case PIPE_QUERY_SO_STATISTICS:
237 nv50_query_get(push, q, 0x00, 0x05805002);
238 nv50_query_get(push, q, 0x10, 0x06805002);
239 break;
240 case PIPE_QUERY_PIPELINE_STATISTICS:
241 nv50_query_get(push, q, 0x00, 0x00801002); /* VFETCH, VERTICES */
242 nv50_query_get(push, q, 0x10, 0x01801002); /* VFETCH, PRIMS */
243 nv50_query_get(push, q, 0x20, 0x02802002); /* VP, LAUNCHES */
244 nv50_query_get(push, q, 0x30, 0x03806002); /* GP, LAUNCHES */
245 nv50_query_get(push, q, 0x40, 0x04806002); /* GP, PRIMS_OUT */
246 nv50_query_get(push, q, 0x50, 0x07804002); /* RAST, PRIMS_IN */
247 nv50_query_get(push, q, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
248 nv50_query_get(push, q, 0x70, 0x0980a002); /* ROP, PIXELS */
249 break;
250 case PIPE_QUERY_TIMESTAMP:
251 q->sequence++;
252 /* fall through */
253 case PIPE_QUERY_TIME_ELAPSED:
254 nv50_query_get(push, q, 0, 0x00005002);
255 break;
256 case PIPE_QUERY_GPU_FINISHED:
257 q->sequence++;
258 nv50_query_get(push, q, 0, 0x1000f010);
259 break;
260 case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET:
261 nv50_query_get(push, q, 0, 0x0d005002 | (q->index << 5));
262 break;
263 case PIPE_QUERY_TIMESTAMP_DISJOINT:
264 /* This query is not issued on GPU because disjoint is forced to FALSE */
265 q->state = NV50_QUERY_STATE_READY;
266 break;
267 default:
268 assert(0);
269 break;
270 }
271
272 if (q->is64bit)
273 nouveau_fence_ref(nv50->screen->base.fence.current, &q->fence);
274 }
275
276 static INLINE void
277 nv50_query_update(struct nv50_query *q)
278 {
279 if (q->is64bit) {
280 if (nouveau_fence_signalled(q->fence))
281 q->state = NV50_QUERY_STATE_READY;
282 } else {
283 if (q->data[0] == q->sequence)
284 q->state = NV50_QUERY_STATE_READY;
285 }
286 }
287
288 static boolean
289 nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq,
290 boolean wait, union pipe_query_result *result)
291 {
292 struct nv50_context *nv50 = nv50_context(pipe);
293 struct nv50_query *q = nv50_query(pq);
294 uint64_t *res64 = (uint64_t *)result;
295 uint32_t *res32 = (uint32_t *)result;
296 boolean *res8 = (boolean *)result;
297 uint64_t *data64 = (uint64_t *)q->data;
298 int i;
299
300 if (q->state != NV50_QUERY_STATE_READY)
301 nv50_query_update(q);
302
303 if (q->state != NV50_QUERY_STATE_READY) {
304 if (!wait) {
305 /* for broken apps that spin on GL_QUERY_RESULT_AVAILABLE */
306 if (q->state != NV50_QUERY_STATE_FLUSHED) {
307 q->state = NV50_QUERY_STATE_FLUSHED;
308 PUSH_KICK(nv50->base.pushbuf);
309 }
310 return FALSE;
311 }
312 if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nv50->screen->base.client))
313 return FALSE;
314 }
315 q->state = NV50_QUERY_STATE_READY;
316
317 switch (q->type) {
318 case PIPE_QUERY_GPU_FINISHED:
319 res8[0] = TRUE;
320 break;
321 case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */
322 res64[0] = q->data[1];
323 break;
324 case PIPE_QUERY_PRIMITIVES_GENERATED: /* u64 count, u64 time */
325 case PIPE_QUERY_PRIMITIVES_EMITTED: /* u64 count, u64 time */
326 res64[0] = data64[0] - data64[2];
327 break;
328 case PIPE_QUERY_SO_STATISTICS:
329 res64[0] = data64[0] - data64[4];
330 res64[1] = data64[2] - data64[6];
331 break;
332 case PIPE_QUERY_PIPELINE_STATISTICS:
333 for (i = 0; i < 8; ++i)
334 res64[i] = data64[i * 2] - data64[16 + i * 2];
335 break;
336 case PIPE_QUERY_TIMESTAMP:
337 res64[0] = data64[1];
338 break;
339 case PIPE_QUERY_TIMESTAMP_DISJOINT:
340 res64[0] = 1000000000;
341 res8[8] = FALSE;
342 break;
343 case PIPE_QUERY_TIME_ELAPSED:
344 res64[0] = data64[1] - data64[3];
345 break;
346 case NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET:
347 res32[0] = q->data[1];
348 break;
349 default:
350 return FALSE;
351 }
352
353 return TRUE;
354 }
355
356 void
357 nv84_query_fifo_wait(struct nouveau_pushbuf *push, struct pipe_query *pq)
358 {
359 struct nv50_query *q = nv50_query(pq);
360 unsigned offset = q->offset;
361
362 PUSH_SPACE(push, 5);
363 PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
364 BEGIN_NV04(push, SUBC_3D(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH), 4);
365 PUSH_DATAh(push, q->bo->offset + offset);
366 PUSH_DATA (push, q->bo->offset + offset);
367 PUSH_DATA (push, q->sequence);
368 PUSH_DATA (push, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
369 }
370
371 static void
372 nv50_render_condition(struct pipe_context *pipe,
373 struct pipe_query *pq,
374 boolean condition, uint mode)
375 {
376 struct nv50_context *nv50 = nv50_context(pipe);
377 struct nouveau_pushbuf *push = nv50->base.pushbuf;
378 struct nv50_query *q;
379 uint32_t cond;
380 boolean wait =
381 mode != PIPE_RENDER_COND_NO_WAIT &&
382 mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
383
384 if (!pq) {
385 cond = NV50_3D_COND_MODE_ALWAYS;
386 }
387 else {
388 q = nv50_query(pq);
389 /* NOTE: comparison of 2 queries only works if both have completed */
390 switch (q->type) {
391 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
392 cond = condition ? NV50_3D_COND_MODE_EQUAL :
393 NV50_3D_COND_MODE_NOT_EQUAL;
394 wait = TRUE;
395 break;
396 case PIPE_QUERY_OCCLUSION_COUNTER:
397 case PIPE_QUERY_OCCLUSION_PREDICATE:
398 if (likely(!condition)) {
399 /* XXX: Placeholder, handle nesting here if available */
400 if (unlikely(false))
401 cond = wait ? NV50_3D_COND_MODE_NOT_EQUAL :
402 NV50_3D_COND_MODE_ALWAYS;
403 else
404 cond = NV50_3D_COND_MODE_RES_NON_ZERO;
405 } else {
406 cond = wait ? NV50_3D_COND_MODE_EQUAL : NV50_3D_COND_MODE_ALWAYS;
407 }
408 break;
409 default:
410 assert(!"render condition query not a predicate");
411 cond = NV50_3D_COND_MODE_ALWAYS;
412 break;
413 }
414 }
415
416 nv50->cond_query = pq;
417 nv50->cond_cond = condition;
418 nv50->cond_condmode = cond;
419 nv50->cond_mode = mode;
420
421 if (!pq) {
422 PUSH_SPACE(push, 2);
423 BEGIN_NV04(push, NV50_3D(COND_MODE), 1);
424 PUSH_DATA (push, cond);
425 return;
426 }
427
428 PUSH_SPACE(push, 9);
429
430 if (wait) {
431 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
432 PUSH_DATA (push, 0);
433 }
434
435 PUSH_REFN (push, q->bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
436 BEGIN_NV04(push, NV50_3D(COND_ADDRESS_HIGH), 3);
437 PUSH_DATAh(push, q->bo->offset + q->offset);
438 PUSH_DATA (push, q->bo->offset + q->offset);
439 PUSH_DATA (push, cond);
440
441 BEGIN_NV04(push, NV50_2D(COND_ADDRESS_HIGH), 2);
442 PUSH_DATAh(push, q->bo->offset + q->offset);
443 PUSH_DATA (push, q->bo->offset + q->offset);
444 }
445
446 void
447 nv50_query_pushbuf_submit(struct nouveau_pushbuf *push,
448 struct pipe_query *pq, unsigned result_offset)
449 {
450 struct nv50_query *q = nv50_query(pq);
451
452 /* XXX: does this exist ? */
453 #define NV50_IB_ENTRY_1_NO_PREFETCH (0 << (31 - 8))
454
455 PUSH_REFN(push, q->bo, NOUVEAU_BO_RD | NOUVEAU_BO_GART);
456 nouveau_pushbuf_space(push, 0, 0, 1);
457 nouveau_pushbuf_data(push, q->bo, q->offset + result_offset, 4 |
458 NV50_IB_ENTRY_1_NO_PREFETCH);
459 }
460
461 void
462 nva0_so_target_save_offset(struct pipe_context *pipe,
463 struct pipe_stream_output_target *ptarg,
464 unsigned index, boolean serialize)
465 {
466 struct nv50_so_target *targ = nv50_so_target(ptarg);
467
468 if (serialize) {
469 struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
470 PUSH_SPACE(push, 2);
471 BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
472 PUSH_DATA (push, 0);
473 }
474
475 nv50_query(targ->pq)->index = index;
476 nv50_query_end(pipe, targ->pq);
477 }
478
479 void
480 nv50_init_query_functions(struct nv50_context *nv50)
481 {
482 struct pipe_context *pipe = &nv50->base.pipe;
483
484 pipe->create_query = nv50_query_create;
485 pipe->destroy_query = nv50_query_destroy;
486 pipe->begin_query = nv50_query_begin;
487 pipe->end_query = nv50_query_end;
488 pipe->get_query_result = nv50_query_result;
489 pipe->render_condition = nv50_render_condition;
490 }