2 * Copyright 2011 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * Authors: Christoph Bumiller
25 #define NVC0_PUSH_EXPLICIT_SPACE_CHECKING
27 #include "nvc0_context.h"
28 #include "nouveau/nv_object.xml.h"
35 struct nouveau_bo
*bo
;
37 uint32_t offset
; /* base + i * rotate */
42 int nesting
; /* only used for occlusion queries */
43 struct nouveau_mm_allocation
*mm
;
46 #define NVC0_QUERY_ALLOC_SPACE 256
48 static INLINE
struct nvc0_query
*
49 nvc0_query(struct pipe_query
*pipe
)
51 return (struct nvc0_query
*)pipe
;
55 nvc0_query_allocate(struct nvc0_context
*nvc0
, struct nvc0_query
*q
, int size
)
57 struct nvc0_screen
*screen
= nvc0
->screen
;
61 nouveau_bo_ref(NULL
, &q
->bo
);
64 nouveau_mm_free(q
->mm
);
66 nouveau_fence_work(screen
->base
.fence
.current
,
67 nouveau_mm_free_work
, q
->mm
);
71 q
->mm
= nouveau_mm_allocate(screen
->base
.mm_GART
, size
, &q
->bo
, &q
->base
);
76 ret
= nouveau_bo_map(q
->bo
, 0, screen
->base
.client
);
78 nvc0_query_allocate(nvc0
, q
, 0);
81 q
->data
= (uint32_t *)((uint8_t *)q
->bo
->map
+ q
->base
);
87 nvc0_query_destroy(struct pipe_context
*pipe
, struct pipe_query
*pq
)
89 nvc0_query_allocate(nvc0_context(pipe
), nvc0_query(pq
), 0);
93 static struct pipe_query
*
94 nvc0_query_create(struct pipe_context
*pipe
, unsigned type
)
96 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
98 unsigned space
= NVC0_QUERY_ALLOC_SPACE
;
100 q
= CALLOC_STRUCT(nvc0_query
);
105 case PIPE_QUERY_OCCLUSION_COUNTER
:
106 case PIPE_QUERY_OCCLUSION_PREDICATE
:
108 space
= NVC0_QUERY_ALLOC_SPACE
;
110 case PIPE_QUERY_PIPELINE_STATISTICS
:
114 case PIPE_QUERY_SO_STATISTICS
:
115 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
119 case PIPE_QUERY_TIME_ELAPSED
:
120 case PIPE_QUERY_TIMESTAMP
:
121 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
122 case PIPE_QUERY_GPU_FINISHED
:
123 case PIPE_QUERY_PRIMITIVES_GENERATED
:
124 case PIPE_QUERY_PRIMITIVES_EMITTED
:
127 case NVC0_QUERY_TFB_BUFFER_OFFSET
:
134 if (!nvc0_query_allocate(nvc0
, q
, space
)) {
142 /* we advance before query_begin ! */
143 q
->offset
-= q
->rotate
;
144 q
->data
-= q
->rotate
/ sizeof(*q
->data
);
147 q
->data
[0] = 0; /* initialize sequence */
149 return (struct pipe_query
*)q
;
153 nvc0_query_get(struct nouveau_pushbuf
*push
, struct nvc0_query
*q
,
154 unsigned offset
, uint32_t get
)
159 PUSH_REFN (push
, q
->bo
, NOUVEAU_BO_GART
| NOUVEAU_BO_WR
);
160 BEGIN_NVC0(push
, NVC0_3D(QUERY_ADDRESS_HIGH
), 4);
161 PUSH_DATAh(push
, q
->bo
->offset
+ offset
);
162 PUSH_DATA (push
, q
->bo
->offset
+ offset
);
163 PUSH_DATA (push
, q
->sequence
);
164 PUSH_DATA (push
, get
);
168 nvc0_query_rotate(struct nvc0_context
*nvc0
, struct nvc0_query
*q
)
170 q
->offset
+= q
->rotate
;
171 q
->data
+= q
->rotate
/ sizeof(*q
->data
);
172 if (q
->offset
- q
->base
== NVC0_QUERY_ALLOC_SPACE
)
173 nvc0_query_allocate(nvc0
, q
, NVC0_QUERY_ALLOC_SPACE
);
177 nvc0_query_begin(struct pipe_context
*pipe
, struct pipe_query
*pq
)
179 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
180 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
181 struct nvc0_query
*q
= nvc0_query(pq
);
183 /* For occlusion queries we have to change the storage, because a previous
184 * query might set the initial render conition to FALSE even *after* we re-
185 * initialized it to TRUE.
188 nvc0_query_rotate(nvc0
, q
);
190 /* XXX: can we do this with the GPU, and sync with respect to a previous
193 q
->data
[0] = q
->sequence
; /* initialize sequence */
194 q
->data
[1] = 1; /* initial render condition = TRUE */
195 q
->data
[4] = q
->sequence
+ 1; /* for comparison COND_MODE */
201 case PIPE_QUERY_OCCLUSION_COUNTER
:
202 case PIPE_QUERY_OCCLUSION_PREDICATE
:
203 q
->nesting
= nvc0
->screen
->num_occlusion_queries_active
++;
205 nvc0_query_get(push
, q
, 0x10, 0x0100f002);
208 BEGIN_NVC0(push
, NVC0_3D(COUNTER_RESET
), 1);
209 PUSH_DATA (push
, NVC0_3D_COUNTER_RESET_SAMPLECNT
);
210 IMMED_NVC0(push
, NVC0_3D(SAMPLECNT_ENABLE
), 1);
213 case PIPE_QUERY_PRIMITIVES_GENERATED
:
214 nvc0_query_get(push
, q
, 0x10, 0x06805002 | (q
->index
<< 5));
216 case PIPE_QUERY_PRIMITIVES_EMITTED
:
217 nvc0_query_get(push
, q
, 0x10, 0x05805002 | (q
->index
<< 5));
219 case PIPE_QUERY_SO_STATISTICS
:
220 nvc0_query_get(push
, q
, 0x20, 0x05805002 | (q
->index
<< 5));
221 nvc0_query_get(push
, q
, 0x30, 0x06805002 | (q
->index
<< 5));
223 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
224 nvc0_query_get(push
, q
, 0x10, 0x03005002 | (q
->index
<< 5));
226 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
227 case PIPE_QUERY_TIME_ELAPSED
:
228 nvc0_query_get(push
, q
, 0x10, 0x00005002);
230 case PIPE_QUERY_PIPELINE_STATISTICS
:
231 nvc0_query_get(push
, q
, 0xc0 + 0x00, 0x00801002); /* VFETCH, VERTICES */
232 nvc0_query_get(push
, q
, 0xc0 + 0x10, 0x01801002); /* VFETCH, PRIMS */
233 nvc0_query_get(push
, q
, 0xc0 + 0x20, 0x02802002); /* VP, LAUNCHES */
234 nvc0_query_get(push
, q
, 0xc0 + 0x30, 0x03806002); /* GP, LAUNCHES */
235 nvc0_query_get(push
, q
, 0xc0 + 0x40, 0x04806002); /* GP, PRIMS_OUT */
236 nvc0_query_get(push
, q
, 0xc0 + 0x50, 0x07804002); /* RAST, PRIMS_IN */
237 nvc0_query_get(push
, q
, 0xc0 + 0x60, 0x08804002); /* RAST, PRIMS_OUT */
238 nvc0_query_get(push
, q
, 0xc0 + 0x70, 0x0980a002); /* ROP, PIXELS */
239 nvc0_query_get(push
, q
, 0xc0 + 0x80, 0x0d808002); /* TCP, LAUNCHES */
240 nvc0_query_get(push
, q
, 0xc0 + 0x90, 0x0e809002); /* TEP, LAUNCHES */
250 nvc0_query_end(struct pipe_context
*pipe
, struct pipe_query
*pq
)
252 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
253 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
254 struct nvc0_query
*q
= nvc0_query(pq
);
257 /* some queries don't require 'begin' to be called (e.g. GPU_FINISHED) */
259 nvc0_query_rotate(nvc0
, q
);
266 case PIPE_QUERY_OCCLUSION_COUNTER
:
267 case PIPE_QUERY_OCCLUSION_PREDICATE
:
268 nvc0_query_get(push
, q
, 0, 0x0100f002);
269 if (--nvc0
->screen
->num_occlusion_queries_active
== 0) {
271 IMMED_NVC0(push
, NVC0_3D(SAMPLECNT_ENABLE
), 0);
274 case PIPE_QUERY_PRIMITIVES_GENERATED
:
275 nvc0_query_get(push
, q
, 0, 0x06805002 | (q
->index
<< 5));
277 case PIPE_QUERY_PRIMITIVES_EMITTED
:
278 nvc0_query_get(push
, q
, 0, 0x05805002 | (q
->index
<< 5));
280 case PIPE_QUERY_SO_STATISTICS
:
281 nvc0_query_get(push
, q
, 0x00, 0x05805002 | (q
->index
<< 5));
282 nvc0_query_get(push
, q
, 0x10, 0x06805002 | (q
->index
<< 5));
284 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
285 /* TODO: How do we sum over all streams for render condition ? */
286 /* PRIMS_DROPPED doesn't write sequence, use a ZERO query to sync on */
287 nvc0_query_get(push
, q
, 0x00, 0x03005002 | (q
->index
<< 5));
288 nvc0_query_get(push
, q
, 0x20, 0x00005002);
290 case PIPE_QUERY_TIMESTAMP
:
291 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
292 case PIPE_QUERY_TIME_ELAPSED
:
293 nvc0_query_get(push
, q
, 0, 0x00005002);
295 case PIPE_QUERY_GPU_FINISHED
:
296 nvc0_query_get(push
, q
, 0, 0x1000f010);
298 case PIPE_QUERY_PIPELINE_STATISTICS
:
299 nvc0_query_get(push
, q
, 0x00, 0x00801002); /* VFETCH, VERTICES */
300 nvc0_query_get(push
, q
, 0x10, 0x01801002); /* VFETCH, PRIMS */
301 nvc0_query_get(push
, q
, 0x20, 0x02802002); /* VP, LAUNCHES */
302 nvc0_query_get(push
, q
, 0x30, 0x03806002); /* GP, LAUNCHES */
303 nvc0_query_get(push
, q
, 0x40, 0x04806002); /* GP, PRIMS_OUT */
304 nvc0_query_get(push
, q
, 0x50, 0x07804002); /* RAST, PRIMS_IN */
305 nvc0_query_get(push
, q
, 0x60, 0x08804002); /* RAST, PRIMS_OUT */
306 nvc0_query_get(push
, q
, 0x70, 0x0980a002); /* ROP, PIXELS */
307 nvc0_query_get(push
, q
, 0x80, 0x0d808002); /* TCP, LAUNCHES */
308 nvc0_query_get(push
, q
, 0x90, 0x0e809002); /* TEP, LAUNCHES */
310 case NVC0_QUERY_TFB_BUFFER_OFFSET
:
311 /* indexed by TFB buffer instead of by vertex stream */
312 nvc0_query_get(push
, q
, 0x00, 0x0d005002 | (q
->index
<< 5));
320 static INLINE boolean
321 nvc0_query_ready(struct nouveau_client
*cli
, struct nvc0_query
*q
)
324 return !nouveau_bo_map(q
->bo
, NOUVEAU_BO_RD
| NOUVEAU_BO_NOBLOCK
, cli
);
326 return q
->data
[0] == q
->sequence
;
331 nvc0_query_result(struct pipe_context
*pipe
, struct pipe_query
*pq
,
332 boolean wait
, union pipe_query_result
*result
)
334 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
335 struct nvc0_query
*q
= nvc0_query(pq
);
336 uint64_t *res64
= (uint64_t*)result
;
337 uint32_t *res32
= (uint32_t*)result
;
338 boolean
*res8
= (boolean
*)result
;
339 uint64_t *data64
= (uint64_t *)q
->data
;
342 if (!q
->ready
) /* update ? */
343 q
->ready
= nvc0_query_ready(nvc0
->screen
->base
.client
, q
);
346 /* flush for silly apps that spin on GL_QUERY_RESULT_AVAILABLE */
347 if (nouveau_pushbuf_refd(nvc0
->base
.pushbuf
, q
->bo
) & NOUVEAU_BO_WR
)
348 PUSH_KICK(nvc0
->base
.pushbuf
);
351 if (nouveau_bo_wait(q
->bo
, NOUVEAU_BO_RD
, nvc0
->screen
->base
.client
))
357 case PIPE_QUERY_GPU_FINISHED
:
360 case PIPE_QUERY_OCCLUSION_COUNTER
: /* u32 sequence, u32 count, u64 time */
361 res64
[0] = q
->data
[1] - q
->data
[5];
363 case PIPE_QUERY_OCCLUSION_PREDICATE
:
364 res8
[0] = q
->data
[1] != q
->data
[5];
366 case PIPE_QUERY_PRIMITIVES_GENERATED
: /* u64 count, u64 time */
367 case PIPE_QUERY_PRIMITIVES_EMITTED
: /* u64 count, u64 time */
368 res64
[0] = data64
[0] - data64
[2];
370 case PIPE_QUERY_SO_STATISTICS
:
371 res64
[0] = data64
[0] - data64
[4];
372 res64
[1] = data64
[2] - data64
[6];
374 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
375 res8
[0] = data64
[0] != data64
[2];
377 case PIPE_QUERY_TIMESTAMP
:
378 res64
[0] = data64
[1];
380 case PIPE_QUERY_TIMESTAMP_DISJOINT
: /* u32 sequence, u32 0, u64 time */
381 res64
[0] = 1000000000;
382 res8
[8] = (data64
[1] == data64
[3]) ? FALSE
: TRUE
;
384 case PIPE_QUERY_TIME_ELAPSED
:
385 res64
[0] = data64
[1] - data64
[3];
387 case PIPE_QUERY_PIPELINE_STATISTICS
:
388 for (i
= 0; i
< 10; ++i
)
389 res64
[i
] = data64
[i
* 2] - data64
[24 + i
* 2];
391 case NVC0_QUERY_TFB_BUFFER_OFFSET
:
392 res32
[0] = q
->data
[1];
402 nvc0_query_fifo_wait(struct nouveau_pushbuf
*push
, struct pipe_query
*pq
)
404 struct nvc0_query
*q
= nvc0_query(pq
);
405 unsigned offset
= q
->offset
;
407 if (q
->type
== PIPE_QUERY_SO_OVERFLOW_PREDICATE
) offset
+= 0x20;
410 PUSH_REFN (push
, q
->bo
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
);
411 BEGIN_NVC0(push
, SUBC_3D(NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH
), 4);
412 PUSH_DATAh(push
, q
->bo
->offset
+ offset
);
413 PUSH_DATA (push
, q
->bo
->offset
+ offset
);
414 PUSH_DATA (push
, q
->sequence
);
415 PUSH_DATA (push
, (1 << 12) |
416 NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL
);
420 nvc0_render_condition(struct pipe_context
*pipe
,
421 struct pipe_query
*pq
, uint mode
)
423 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
424 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
425 struct nvc0_query
*q
;
427 boolean negated
= FALSE
;
429 mode
!= PIPE_RENDER_COND_NO_WAIT
&&
430 mode
!= PIPE_RENDER_COND_BY_REGION_NO_WAIT
;
434 IMMED_NVC0(push
, NVC0_3D(COND_MODE
), NVC0_3D_COND_MODE_ALWAYS
);
439 /* NOTE: comparison of 2 queries only works if both have completed */
441 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
442 cond
= negated
? NVC0_3D_COND_MODE_EQUAL
:
443 NVC0_3D_COND_MODE_NOT_EQUAL
;
446 case PIPE_QUERY_OCCLUSION_COUNTER
:
447 case PIPE_QUERY_OCCLUSION_PREDICATE
:
448 if (likely(!negated
)) {
449 if (unlikely(q
->nesting
))
450 cond
= wait
? NVC0_3D_COND_MODE_NOT_EQUAL
:
451 NVC0_3D_COND_MODE_ALWAYS
;
453 cond
= NVC0_3D_COND_MODE_RES_NON_ZERO
;
455 cond
= wait
? NVC0_3D_COND_MODE_EQUAL
: NVC0_3D_COND_MODE_ALWAYS
;
459 assert(!"render condition query not a predicate");
460 mode
= NVC0_3D_COND_MODE_ALWAYS
;
465 nvc0_query_fifo_wait(push
, pq
);
468 PUSH_REFN (push
, q
->bo
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
);
469 BEGIN_NVC0(push
, NVC0_3D(COND_ADDRESS_HIGH
), 3);
470 PUSH_DATAh(push
, q
->bo
->offset
+ q
->offset
);
471 PUSH_DATA (push
, q
->bo
->offset
+ q
->offset
);
472 PUSH_DATA (push
, cond
);
476 nvc0_query_pushbuf_submit(struct nouveau_pushbuf
*push
,
477 struct pipe_query
*pq
, unsigned result_offset
)
479 struct nvc0_query
*q
= nvc0_query(pq
);
481 #define NVC0_IB_ENTRY_1_NO_PREFETCH (1 << (31 - 8))
483 nouveau_pushbuf_space(push
, 0, 0, 1);
484 nouveau_pushbuf_data(push
, q
->bo
, q
->offset
+ result_offset
, 4 |
485 NVC0_IB_ENTRY_1_NO_PREFETCH
);
489 nvc0_so_target_save_offset(struct pipe_context
*pipe
,
490 struct pipe_stream_output_target
*ptarg
,
491 unsigned index
, boolean
*serialize
)
493 struct nvc0_so_target
*targ
= nvc0_so_target(ptarg
);
497 PUSH_SPACE(nvc0_context(pipe
)->base
.pushbuf
, 1);
498 IMMED_NVC0(nvc0_context(pipe
)->base
.pushbuf
, NVC0_3D(SERIALIZE
), 0);
501 nvc0_query(targ
->pq
)->index
= index
;
503 nvc0_query_end(pipe
, targ
->pq
);
507 nvc0_init_query_functions(struct nvc0_context
*nvc0
)
509 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
511 pipe
->create_query
= nvc0_query_create
;
512 pipe
->destroy_query
= nvc0_query_destroy
;
513 pipe
->begin_query
= nvc0_query_begin
;
514 pipe
->end_query
= nvc0_query_end
;
515 pipe
->get_query_result
= nvc0_query_result
;
516 pipe
->render_condition
= nvc0_render_condition
;