2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define NVC0_PUSH_EXPLICIT_SPACE_CHECKING
25 #include "pipe/p_context.h"
26 #include "pipe/p_state.h"
27 #include "util/u_inlines.h"
28 #include "util/format/u_format.h"
29 #include "translate/translate.h"
31 #include "nvc0/nvc0_context.h"
32 #include "nvc0/nvc0_query_hw.h"
33 #include "nvc0/nvc0_resource.h"
35 #include "nvc0/nvc0_3d.xml.h"
38 nvc0_vertex_state_delete(struct pipe_context
*pipe
,
41 struct nvc0_vertex_stateobj
*so
= hwcso
;
44 so
->translate
->release(so
->translate
);
49 nvc0_vertex_state_create(struct pipe_context
*pipe
,
50 unsigned num_elements
,
51 const struct pipe_vertex_element
*elements
)
53 struct nvc0_vertex_stateobj
*so
;
54 struct translate_key transkey
;
56 unsigned src_offset_max
= 0;
58 so
= MALLOC(sizeof(*so
) +
59 num_elements
* sizeof(struct nvc0_vertex_element
));
62 so
->num_elements
= num_elements
;
63 so
->instance_elts
= 0;
64 so
->instance_bufs
= 0;
65 so
->shared_slots
= false;
66 so
->need_conversion
= false;
68 memset(so
->vb_access_size
, 0, sizeof(so
->vb_access_size
));
70 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
71 so
->min_instance_div
[i
] = 0xffffffff;
73 transkey
.nr_elements
= 0;
74 transkey
.output_stride
= 0;
76 for (i
= 0; i
< num_elements
; ++i
) {
77 const struct pipe_vertex_element
*ve
= &elements
[i
];
78 const unsigned vbi
= ve
->vertex_buffer_index
;
80 enum pipe_format fmt
= ve
->src_format
;
82 so
->element
[i
].pipe
= elements
[i
];
83 so
->element
[i
].state
= nvc0_vertex_format
[fmt
].vtx
;
85 if (!so
->element
[i
].state
) {
86 switch (util_format_get_nr_components(fmt
)) {
87 case 1: fmt
= PIPE_FORMAT_R32_FLOAT
; break;
88 case 2: fmt
= PIPE_FORMAT_R32G32_FLOAT
; break;
89 case 3: fmt
= PIPE_FORMAT_R32G32B32_FLOAT
; break;
90 case 4: fmt
= PIPE_FORMAT_R32G32B32A32_FLOAT
; break;
96 so
->element
[i
].state
= nvc0_vertex_format
[fmt
].vtx
;
97 so
->need_conversion
= true;
98 pipe_debug_message(&nouveau_context(pipe
)->debug
, FALLBACK
,
99 "Converting vertex element %d, no hw format %s",
100 i
, util_format_name(ve
->src_format
));
102 size
= util_format_get_blocksize(fmt
);
104 src_offset_max
= MAX2(src_offset_max
, ve
->src_offset
);
106 if (so
->vb_access_size
[vbi
] < (ve
->src_offset
+ size
))
107 so
->vb_access_size
[vbi
] = ve
->src_offset
+ size
;
109 if (unlikely(ve
->instance_divisor
)) {
110 so
->instance_elts
|= 1 << i
;
111 so
->instance_bufs
|= 1 << vbi
;
112 if (ve
->instance_divisor
< so
->min_instance_div
[vbi
])
113 so
->min_instance_div
[vbi
] = ve
->instance_divisor
;
118 unsigned j
= transkey
.nr_elements
++;
120 ca
= util_format_description(fmt
)->channel
[0].size
/ 8;
121 if (ca
!= 1 && ca
!= 2)
124 transkey
.element
[j
].type
= TRANSLATE_ELEMENT_NORMAL
;
125 transkey
.element
[j
].input_format
= ve
->src_format
;
126 transkey
.element
[j
].input_buffer
= vbi
;
127 transkey
.element
[j
].input_offset
= ve
->src_offset
;
128 transkey
.element
[j
].instance_divisor
= ve
->instance_divisor
;
130 transkey
.output_stride
= align(transkey
.output_stride
, ca
);
131 transkey
.element
[j
].output_format
= fmt
;
132 transkey
.element
[j
].output_offset
= transkey
.output_stride
;
133 transkey
.output_stride
+= size
;
135 so
->element
[i
].state_alt
= so
->element
[i
].state
;
136 so
->element
[i
].state_alt
|= transkey
.element
[j
].output_offset
<< 7;
139 so
->element
[i
].state
|= i
<< NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
;
141 transkey
.output_stride
= align(transkey
.output_stride
, 4);
143 so
->size
= transkey
.output_stride
;
144 so
->translate
= translate_create(&transkey
);
146 if (so
->instance_elts
|| src_offset_max
>= (1 << 14))
148 so
->shared_slots
= true;
150 for (i
= 0; i
< num_elements
; ++i
) {
151 const unsigned b
= elements
[i
].vertex_buffer_index
;
152 const unsigned s
= elements
[i
].src_offset
;
153 so
->element
[i
].state
&= ~NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__MASK
;
154 so
->element
[i
].state
|= b
<< NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
;
155 so
->element
[i
].state
|= s
<< NVC0_3D_VERTEX_ATTRIB_FORMAT_OFFSET__SHIFT
;
160 #define NVC0_3D_VERTEX_ATTRIB_INACTIVE \
161 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT | \
162 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32 | NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
164 #define VTX_ATTR(a, c, t, s) \
165 ((NVC0_3D_VTX_ATTR_DEFINE_TYPE_##t) | \
166 (NVC0_3D_VTX_ATTR_DEFINE_SIZE_##s) | \
167 ((a) << NVC0_3D_VTX_ATTR_DEFINE_ATTR__SHIFT) | \
168 ((c) << NVC0_3D_VTX_ATTR_DEFINE_COMP__SHIFT))
171 nvc0_set_constant_vertex_attrib(struct nvc0_context
*nvc0
, const unsigned a
)
173 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
174 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[a
].pipe
;
175 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[ve
->vertex_buffer_index
];
177 const struct util_format_description
*desc
;
179 const void *src
= (const uint8_t *)vb
->buffer
.user
+ ve
->src_offset
;
180 assert(vb
->is_user_buffer
);
182 desc
= util_format_description(ve
->src_format
);
185 BEGIN_NVC0(push
, NVC0_3D(VTX_ATTR_DEFINE
), 5);
187 util_format_unpack_rgba(ve
->src_format
, dst
, src
, 1);
188 if (desc
->channel
[0].pure_integer
) {
189 if (desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
) {
190 mode
= VTX_ATTR(a
, 4, SINT
, 32);
192 mode
= VTX_ATTR(a
, 4, UINT
, 32);
195 mode
= VTX_ATTR(a
, 4, FLOAT
, 32);
202 nvc0_user_vbuf_range(struct nvc0_context
*nvc0
, int vbi
,
203 uint32_t *base
, uint32_t *size
)
205 if (unlikely(nvc0
->vertex
->instance_bufs
& (1 << vbi
))) {
206 const uint32_t div
= nvc0
->vertex
->min_instance_div
[vbi
];
207 *base
= nvc0
->instance_off
* nvc0
->vtxbuf
[vbi
].stride
;
208 *size
= (nvc0
->instance_max
/ div
) * nvc0
->vtxbuf
[vbi
].stride
+
209 nvc0
->vertex
->vb_access_size
[vbi
];
211 /* NOTE: if there are user buffers, we *must* have index bounds */
212 assert(nvc0
->vb_elt_limit
!= ~0);
213 *base
= nvc0
->vb_elt_first
* nvc0
->vtxbuf
[vbi
].stride
;
214 *size
= nvc0
->vb_elt_limit
* nvc0
->vtxbuf
[vbi
].stride
+
215 nvc0
->vertex
->vb_access_size
[vbi
];
220 nvc0_release_user_vbufs(struct nvc0_context
*nvc0
)
222 if (nvc0
->vbo_user
) {
223 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX_TMP
);
224 nouveau_scratch_done(&nvc0
->base
);
229 nvc0_update_user_vbufs(struct nvc0_context
*nvc0
)
231 uint64_t address
[PIPE_MAX_ATTRIBS
];
232 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
234 uint32_t written
= 0;
236 PUSH_SPACE(push
, nvc0
->vertex
->num_elements
* 8);
237 for (i
= 0; i
< nvc0
->vertex
->num_elements
; ++i
) {
238 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[i
].pipe
;
239 const unsigned b
= ve
->vertex_buffer_index
;
240 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[b
];
243 if (!(nvc0
->vbo_user
& (1 << b
)))
245 if (nvc0
->constant_vbos
& (1 << b
)) {
246 nvc0_set_constant_vertex_attrib(nvc0
, i
);
249 nvc0_user_vbuf_range(nvc0
, b
, &base
, &size
);
251 if (!(written
& (1 << b
))) {
252 struct nouveau_bo
*bo
;
253 const uint32_t bo_flags
= NOUVEAU_BO_RD
| NOUVEAU_BO_GART
;
255 address
[b
] = nouveau_scratch_data(&nvc0
->base
, vb
->buffer
.user
,
258 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, bo_flags
, bo
);
260 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, user_buffer_upload_bytes
, size
);
263 BEGIN_1IC0(push
, NVC0_3D(MACRO_VERTEX_ARRAY_SELECT
), 5);
265 PUSH_DATAh(push
, address
[b
] + base
+ size
- 1);
266 PUSH_DATA (push
, address
[b
] + base
+ size
- 1);
267 PUSH_DATAh(push
, address
[b
] + ve
->src_offset
);
268 PUSH_DATA (push
, address
[b
] + ve
->src_offset
);
270 nvc0
->base
.vbo_dirty
= true;
274 nvc0_update_user_vbufs_shared(struct nvc0_context
*nvc0
)
276 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
277 uint32_t mask
= nvc0
->vbo_user
& ~nvc0
->constant_vbos
;
279 PUSH_SPACE(push
, nvc0
->num_vtxbufs
* 8);
281 struct nouveau_bo
*bo
;
282 const uint32_t bo_flags
= NOUVEAU_BO_RD
| NOUVEAU_BO_GART
;
285 const int b
= ffs(mask
) - 1;
288 nvc0_user_vbuf_range(nvc0
, b
, &base
, &size
);
290 address
= nouveau_scratch_data(&nvc0
->base
, nvc0
->vtxbuf
[b
].buffer
.user
,
293 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, bo_flags
, bo
);
295 BEGIN_1IC0(push
, NVC0_3D(MACRO_VERTEX_ARRAY_SELECT
), 5);
297 PUSH_DATAh(push
, address
+ base
+ size
- 1);
298 PUSH_DATA (push
, address
+ base
+ size
- 1);
299 PUSH_DATAh(push
, address
);
300 PUSH_DATA (push
, address
);
302 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, user_buffer_upload_bytes
, size
);
305 mask
= nvc0
->state
.constant_elts
;
307 int i
= ffs(mask
) - 1;
309 nvc0_set_constant_vertex_attrib(nvc0
, i
);
314 nvc0_validate_vertex_buffers(struct nvc0_context
*nvc0
)
316 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
317 const struct nvc0_vertex_stateobj
*vertex
= nvc0
->vertex
;
321 PUSH_SPACE(push
, vertex
->num_elements
* 8);
322 for (i
= 0; i
< vertex
->num_elements
; ++i
) {
323 const struct nvc0_vertex_element
*ve
;
324 const struct pipe_vertex_buffer
*vb
;
325 struct nv04_resource
*res
;
327 unsigned limit
, offset
;
329 if (nvc0
->state
.constant_elts
& (1 << i
))
331 ve
= &vertex
->element
[i
];
332 b
= ve
->pipe
.vertex_buffer_index
;
333 vb
= &nvc0
->vtxbuf
[b
];
335 if (nvc0
->vbo_user
& (1 << b
)) {
336 if (!(nvc0
->constant_vbos
& (1 << b
))) {
337 if (ve
->pipe
.instance_divisor
) {
338 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_DIVISOR(i
)), 1);
339 PUSH_DATA (push
, ve
->pipe
.instance_divisor
);
341 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 1);
342 PUSH_DATA (push
, (1 << 12) | vb
->stride
);
344 /* address/value set in nvc0_update_user_vbufs */
347 res
= nv04_resource(vb
->buffer
.resource
);
348 offset
= ve
->pipe
.src_offset
+ vb
->buffer_offset
;
349 limit
= vb
->buffer
.resource
->width0
- 1;
351 if (unlikely(ve
->pipe
.instance_divisor
)) {
352 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 4);
353 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
354 PUSH_DATAh(push
, res
->address
+ offset
);
355 PUSH_DATA (push
, res
->address
+ offset
);
356 PUSH_DATA (push
, ve
->pipe
.instance_divisor
);
358 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 3);
359 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
360 PUSH_DATAh(push
, res
->address
+ offset
);
361 PUSH_DATA (push
, res
->address
+ offset
);
363 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(i
)), 2);
364 PUSH_DATAh(push
, res
->address
+ limit
);
365 PUSH_DATA (push
, res
->address
+ limit
);
367 if (!(refd
& (1 << b
))) {
369 BCTX_REFN(nvc0
->bufctx_3d
, 3D_VTX
, res
, RD
);
373 nvc0_update_user_vbufs(nvc0
);
377 nvc0_validate_vertex_buffers_shared(struct nvc0_context
*nvc0
)
379 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
381 const uint32_t mask
= nvc0
->vbo_user
;
383 PUSH_SPACE(push
, nvc0
->num_vtxbufs
* 8 + nvc0
->vertex
->num_elements
);
384 for (b
= 0; b
< nvc0
->num_vtxbufs
; ++b
) {
385 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[b
];
386 struct nv04_resource
*buf
;
387 uint32_t offset
, limit
;
389 if (mask
& (1 << b
)) {
390 if (!(nvc0
->constant_vbos
& (1 << b
))) {
391 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 1);
392 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
394 /* address/value set in nvc0_update_user_vbufs_shared */
396 } else if (!vb
->buffer
.resource
) {
397 /* there can be holes in the vertex buffer lists */
398 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 0);
401 buf
= nv04_resource(vb
->buffer
.resource
);
402 offset
= vb
->buffer_offset
;
403 limit
= buf
->base
.width0
- 1;
405 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 3);
406 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
407 PUSH_DATAh(push
, buf
->address
+ offset
);
408 PUSH_DATA (push
, buf
->address
+ offset
);
409 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(b
)), 2);
410 PUSH_DATAh(push
, buf
->address
+ limit
);
411 PUSH_DATA (push
, buf
->address
+ limit
);
413 BCTX_REFN(nvc0
->bufctx_3d
, 3D_VTX
, buf
, RD
);
415 /* If there are more elements than buffers, we might not have unset
416 * fetching on the later elements.
418 for (; b
< nvc0
->vertex
->num_elements
; ++b
)
419 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 0);
422 nvc0_update_user_vbufs_shared(nvc0
);
426 nvc0_vertex_arrays_validate(struct nvc0_context
*nvc0
)
428 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
429 struct nvc0_vertex_stateobj
*vertex
= nvc0
->vertex
;
430 struct nvc0_vertex_element
*ve
;
436 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX
);
439 if (unlikely(vertex
->need_conversion
) ||
440 unlikely(nvc0
->vertprog
->vp
.edgeflag
< PIPE_MAX_ATTRIBS
)) {
442 } else if (nvc0
->vbo_user
& ~nvc0
->constant_vbos
) {
443 vbo_mode
= nvc0
->vbo_push_hint
? 1 : 0;
447 const_vbos
= vbo_mode
? 0 : nvc0
->constant_vbos
;
449 update_vertex
= (nvc0
->dirty_3d
& NVC0_NEW_3D_VERTEX
) ||
450 (const_vbos
!= nvc0
->state
.constant_vbos
) ||
451 (vbo_mode
!= nvc0
->state
.vbo_mode
);
454 const unsigned n
= MAX2(vertex
->num_elements
, nvc0
->state
.num_vtxelts
);
456 nvc0
->state
.constant_vbos
= const_vbos
;
457 nvc0
->state
.constant_elts
= 0;
458 nvc0
->state
.num_vtxelts
= vertex
->num_elements
;
459 nvc0
->state
.vbo_mode
= vbo_mode
;
461 if (unlikely(vbo_mode
)) {
462 if (unlikely(nvc0
->state
.instance_elts
& 3)) {
463 /* translate mode uses only 2 vertex buffers */
464 nvc0
->state
.instance_elts
&= ~3;
466 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(0)), 2);
471 PUSH_SPACE(push
, n
* 2 + 4);
473 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(0)), n
);
474 for (i
= 0; i
< vertex
->num_elements
; ++i
)
475 PUSH_DATA(push
, vertex
->element
[i
].state_alt
);
477 PUSH_DATA(push
, NVC0_3D_VERTEX_ATTRIB_INACTIVE
);
479 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(0)), 1);
480 PUSH_DATA (push
, (1 << 12) | vertex
->size
);
481 for (i
= 1; i
< n
; ++i
)
482 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 0);
484 uint32_t *restrict data
;
486 if (unlikely(vertex
->instance_elts
!= nvc0
->state
.instance_elts
)) {
487 nvc0
->state
.instance_elts
= vertex
->instance_elts
;
488 assert(n
); /* if (n == 0), both masks should be 0 */
490 BEGIN_NVC0(push
, NVC0_3D(MACRO_VERTEX_ARRAY_PER_INSTANCE
), 2);
492 PUSH_DATA (push
, vertex
->instance_elts
);
495 PUSH_SPACE(push
, n
* 2 + 1);
496 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(0)), n
);
499 for (i
= 0; i
< vertex
->num_elements
; ++i
) {
500 ve
= &vertex
->element
[i
];
502 if (unlikely(const_vbos
& (1 << ve
->pipe
.vertex_buffer_index
))) {
503 nvc0
->state
.constant_elts
|= 1 << i
;
504 data
[i
] |= NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
;
505 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 0);
509 data
[i
] = NVC0_3D_VERTEX_ATTRIB_INACTIVE
;
510 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 0);
514 if (nvc0
->state
.vbo_mode
) /* using translate, don't set up arrays here */
517 if (vertex
->shared_slots
)
518 nvc0_validate_vertex_buffers_shared(nvc0
);
520 nvc0_validate_vertex_buffers(nvc0
);
523 #define NVC0_PRIM_GL_CASE(n) \
524 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
526 static inline unsigned
527 nvc0_prim_gl(unsigned prim
)
530 NVC0_PRIM_GL_CASE(POINTS
);
531 NVC0_PRIM_GL_CASE(LINES
);
532 NVC0_PRIM_GL_CASE(LINE_LOOP
);
533 NVC0_PRIM_GL_CASE(LINE_STRIP
);
534 NVC0_PRIM_GL_CASE(TRIANGLES
);
535 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP
);
536 NVC0_PRIM_GL_CASE(TRIANGLE_FAN
);
537 NVC0_PRIM_GL_CASE(QUADS
);
538 NVC0_PRIM_GL_CASE(QUAD_STRIP
);
539 NVC0_PRIM_GL_CASE(POLYGON
);
540 NVC0_PRIM_GL_CASE(LINES_ADJACENCY
);
541 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY
);
542 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY
);
543 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY
);
544 NVC0_PRIM_GL_CASE(PATCHES
);
546 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS
;
551 nvc0_draw_vbo_kick_notify(struct nouveau_pushbuf
*push
)
553 struct nvc0_screen
*screen
= push
->user_priv
;
555 nouveau_fence_update(&screen
->base
, true);
557 NOUVEAU_DRV_STAT(&screen
->base
, pushbuf_count
, 1);
561 nvc0_draw_arrays(struct nvc0_context
*nvc0
,
562 unsigned mode
, unsigned start
, unsigned count
,
563 unsigned instance_count
)
565 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
568 if (nvc0
->state
.index_bias
) {
569 /* index_bias is implied 0 if !info->index_size (really ?) */
570 /* TODO: can we deactivate it for the VERTEX_BUFFER_FIRST command ? */
572 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
573 IMMED_NVC0(push
, NVC0_3D(VERTEX_ID_BASE
), 0);
574 nvc0
->state
.index_bias
= 0;
577 prim
= nvc0_prim_gl(mode
);
579 while (instance_count
--) {
581 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
582 PUSH_DATA (push
, prim
);
583 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
584 PUSH_DATA (push
, start
);
585 PUSH_DATA (push
, count
);
586 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
588 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
590 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_array
, 1);
594 nvc0_draw_elements_inline_u08(struct nouveau_pushbuf
*push
, const uint8_t *map
,
595 unsigned start
, unsigned count
)
602 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U32
), count
& 3);
603 for (i
= 0; i
< (count
& 3); ++i
)
604 PUSH_DATA(push
, *map
++);
608 unsigned i
, nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
* 4) / 4;
610 PUSH_SPACE(push
, nr
+ 1);
611 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U8
), nr
);
612 for (i
= 0; i
< nr
; ++i
) {
614 (map
[3] << 24) | (map
[2] << 16) | (map
[1] << 8) | map
[0]);
622 nvc0_draw_elements_inline_u16(struct nouveau_pushbuf
*push
, const uint16_t *map
,
623 unsigned start
, unsigned count
)
630 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
631 PUSH_DATA (push
, *map
++);
634 unsigned i
, nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
* 2) / 2;
636 PUSH_SPACE(push
, nr
+ 1);
637 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U16
), nr
);
638 for (i
= 0; i
< nr
; ++i
) {
639 PUSH_DATA(push
, (map
[1] << 16) | map
[0]);
647 nvc0_draw_elements_inline_u32(struct nouveau_pushbuf
*push
, const uint32_t *map
,
648 unsigned start
, unsigned count
)
653 const unsigned nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
);
655 PUSH_SPACE(push
, nr
+ 1);
656 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U32
), nr
);
657 PUSH_DATAp(push
, map
, nr
);
665 nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf
*push
,
667 unsigned start
, unsigned count
)
674 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
675 PUSH_DATA (push
, *map
++);
678 unsigned i
, nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
* 2) / 2;
680 PUSH_SPACE(push
, nr
+ 1);
681 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U16
), nr
);
682 for (i
= 0; i
< nr
; ++i
) {
683 PUSH_DATA(push
, (map
[1] << 16) | map
[0]);
691 nvc0_draw_elements(struct nvc0_context
*nvc0
, bool shorten
,
692 const struct pipe_draw_info
*info
,
693 unsigned mode
, unsigned start
, unsigned count
,
694 unsigned instance_count
, int32_t index_bias
,
697 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
700 prim
= nvc0_prim_gl(mode
);
702 if (index_bias
!= nvc0
->state
.index_bias
) {
704 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_BASE
), 1);
705 PUSH_DATA (push
, index_bias
);
706 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ID_BASE
), 1);
707 PUSH_DATA (push
, index_bias
);
708 nvc0
->state
.index_bias
= index_bias
;
711 if (!info
->has_user_indices
) {
713 IMMED_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), prim
);
716 BEGIN_NVC0(push
, NVC0_3D(INDEX_BATCH_FIRST
), 2);
717 PUSH_DATA (push
, start
);
718 PUSH_DATA (push
, count
);
719 if (--instance_count
) {
720 BEGIN_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 2);
722 PUSH_DATA (push
, prim
| NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
);
724 } while (instance_count
);
725 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
727 const void *data
= info
->index
.user
;
729 while (instance_count
--) {
731 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
732 PUSH_DATA (push
, prim
);
733 switch (index_size
) {
735 nvc0_draw_elements_inline_u08(push
, data
, start
, count
);
738 nvc0_draw_elements_inline_u16(push
, data
, start
, count
);
742 nvc0_draw_elements_inline_u32_short(push
, data
, start
, count
);
744 nvc0_draw_elements_inline_u32(push
, data
, start
, count
);
751 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
753 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
756 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_indexed
, 1);
760 nvc0_draw_stream_output(struct nvc0_context
*nvc0
,
761 const struct pipe_draw_info
*info
)
763 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
764 struct nvc0_so_target
*so
= nvc0_so_target(info
->count_from_stream_output
);
765 struct nv04_resource
*res
= nv04_resource(so
->pipe
.buffer
);
766 unsigned mode
= nvc0_prim_gl(info
->mode
);
767 unsigned num_instances
= info
->instance_count
;
769 if (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
) {
770 res
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
772 IMMED_NVC0(push
, NVC0_3D(SERIALIZE
), 0);
773 nvc0_hw_query_fifo_wait(nvc0
, nvc0_query(so
->pq
));
774 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
775 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
777 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, gpu_serialize_count
, 1);
780 while (num_instances
--) {
781 nouveau_pushbuf_space(push
, 16, 0, 1);
782 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
783 PUSH_DATA (push
, mode
);
784 BEGIN_NVC0(push
, NVC0_3D(DRAW_TFB_BASE
), 1);
786 BEGIN_NVC0(push
, NVC0_3D(DRAW_TFB_STRIDE
), 1);
787 PUSH_DATA (push
, so
->stride
);
788 BEGIN_NVC0(push
, NVC0_3D(DRAW_TFB_BYTES
), 1);
789 nvc0_hw_query_pushbuf_submit(push
, nvc0_query(so
->pq
), 0x4);
790 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
792 mode
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
797 nvc0_draw_indirect(struct nvc0_context
*nvc0
, const struct pipe_draw_info
*info
)
799 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
800 struct nv04_resource
*buf
= nv04_resource(info
->indirect
->buffer
);
801 struct nv04_resource
*buf_count
= nv04_resource(info
->indirect
->indirect_draw_count
);
802 unsigned size
, macro
, count
= info
->indirect
->draw_count
, drawid
= info
->drawid
;
803 uint32_t offset
= buf
->offset
+ info
->indirect
->offset
;
804 struct nvc0_screen
*screen
= nvc0
->screen
;
808 /* must make FIFO wait for engines idle before continuing to process */
809 if ((buf
->fence_wr
&& !nouveau_fence_signalled(buf
->fence_wr
)) ||
810 (buf_count
&& buf_count
->fence_wr
&&
811 !nouveau_fence_signalled(buf_count
->fence_wr
))) {
812 IMMED_NVC0(push
, SUBC_3D(NV10_SUBCHAN_REF_CNT
), 0);
815 /* Queue things up to let the macros write params to the driver constbuf */
816 BEGIN_NVC0(push
, NVC0_3D(CB_SIZE
), 3);
817 PUSH_DATA (push
, NVC0_CB_AUX_SIZE
);
818 PUSH_DATAh(push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
819 PUSH_DATA (push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
820 BEGIN_NVC0(push
, NVC0_3D(CB_POS
), 1);
821 PUSH_DATA (push
, NVC0_CB_AUX_DRAW_INFO
);
823 if (info
->index_size
) {
824 assert(!info
->has_user_indices
);
825 assert(nouveau_resource_mapped_by_gpu(info
->index
.resource
));
828 macro
= NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT
;
830 macro
= NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT
;
832 if (nvc0
->state
.index_bias
) {
833 /* index_bias is implied 0 if !info->index_size (really ?) */
834 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
835 IMMED_NVC0(push
, NVC0_3D(VERTEX_ID_BASE
), 0);
836 nvc0
->state
.index_bias
= 0;
840 macro
= NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT
;
842 macro
= NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT
;
845 /* If the stride is not the natural stride, we have to stick a separate
846 * push data reference for each draw. Otherwise it can all go in as one.
847 * Of course there is a maximum packet size, so we have to break things up
848 * along those borders as well.
851 unsigned draws
= count
, pushes
, i
;
852 if (info
->indirect
->stride
== size
* 4) {
853 draws
= MIN2(draws
, (NV04_PFIFO_MAX_PACKET_LEN
- 4) / size
);
856 draws
= MIN2(draws
, 32);
860 nouveau_pushbuf_space(push
, 16, 0, pushes
+ !!buf_count
);
861 PUSH_REFN(push
, buf
->bo
, NOUVEAU_BO_RD
| buf
->domain
);
863 PUSH_REFN(push
, buf_count
->bo
, NOUVEAU_BO_RD
| buf_count
->domain
);
865 NVC0_FIFO_PKHDR_1I(0, macro
, 3 + !!buf_count
+ draws
* size
));
866 PUSH_DATA(push
, nvc0_prim_gl(info
->mode
));
867 PUSH_DATA(push
, drawid
);
868 PUSH_DATA(push
, draws
);
870 nouveau_pushbuf_data(push
,
872 buf_count
->offset
+ info
->indirect
->indirect_draw_count_offset
,
873 NVC0_IB_ENTRY_1_NO_PREFETCH
| 4);
876 nouveau_pushbuf_data(push
,
878 NVC0_IB_ENTRY_1_NO_PREFETCH
| (size
* 4 * draws
));
879 offset
+= draws
* info
->indirect
->stride
;
881 for (i
= 0; i
< pushes
; i
++) {
882 nouveau_pushbuf_data(push
,
884 NVC0_IB_ENTRY_1_NO_PREFETCH
| (size
* 4));
885 offset
+= info
->indirect
->stride
;
894 nvc0_update_prim_restart(struct nvc0_context
*nvc0
, bool en
, uint32_t index
)
896 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
898 if (en
!= nvc0
->state
.prim_restart
) {
900 BEGIN_NVC0(push
, NVC0_3D(PRIM_RESTART_ENABLE
), 2);
902 PUSH_DATA (push
, index
);
904 IMMED_NVC0(push
, NVC0_3D(PRIM_RESTART_ENABLE
), 0);
906 nvc0
->state
.prim_restart
= en
;
909 BEGIN_NVC0(push
, NVC0_3D(PRIM_RESTART_INDEX
), 1);
910 PUSH_DATA (push
, index
);
915 nvc0_draw_vbo(struct pipe_context
*pipe
, const struct pipe_draw_info
*info
)
917 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
918 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
919 struct nvc0_screen
*screen
= nvc0
->screen
;
920 unsigned vram_domain
= NV_VRAM_DOMAIN(&screen
->base
);
923 /* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
924 nvc0
->vb_elt_first
= info
->min_index
+ info
->index_bias
;
925 nvc0
->vb_elt_limit
= info
->max_index
- info
->min_index
;
926 nvc0
->instance_off
= info
->start_instance
;
927 nvc0
->instance_max
= info
->instance_count
- 1;
929 /* For picking only a few vertices from a large user buffer, push is better,
930 * if index count is larger and we expect repeated vertices, suggest upload.
932 nvc0
->vbo_push_hint
=
933 !info
->indirect
&& info
->index_size
&&
934 (nvc0
->vb_elt_limit
>= (info
->count
* 2));
936 /* Check whether we want to switch vertex-submission mode. */
937 if (nvc0
->vbo_user
&& !(nvc0
->dirty_3d
& (NVC0_NEW_3D_ARRAYS
| NVC0_NEW_3D_VERTEX
))) {
938 if (nvc0
->vbo_push_hint
!= !!nvc0
->state
.vbo_mode
)
939 if (nvc0
->state
.vbo_mode
!= 3)
940 nvc0
->dirty_3d
|= NVC0_NEW_3D_ARRAYS
;
942 if (!(nvc0
->dirty_3d
& NVC0_NEW_3D_ARRAYS
) && nvc0
->state
.vbo_mode
== 0) {
943 if (nvc0
->vertex
->shared_slots
)
944 nvc0_update_user_vbufs_shared(nvc0
);
946 nvc0_update_user_vbufs(nvc0
);
950 if (info
->mode
== PIPE_PRIM_PATCHES
&&
951 nvc0
->state
.patch_vertices
!= info
->vertices_per_patch
) {
952 nvc0
->state
.patch_vertices
= info
->vertices_per_patch
;
954 IMMED_NVC0(push
, NVC0_3D(PATCH_VERTICES
), nvc0
->state
.patch_vertices
);
957 if (info
->index_size
&& !info
->has_user_indices
) {
958 struct nv04_resource
*buf
= nv04_resource(info
->index
.resource
);
961 assert(nouveau_resource_mapped_by_gpu(&buf
->base
));
964 BEGIN_NVC0(push
, NVC0_3D(INDEX_ARRAY_START_HIGH
), 5);
965 PUSH_DATAh(push
, buf
->address
);
966 PUSH_DATA (push
, buf
->address
);
967 PUSH_DATAh(push
, buf
->address
+ buf
->base
.width0
- 1);
968 PUSH_DATA (push
, buf
->address
+ buf
->base
.width0
- 1);
969 PUSH_DATA (push
, info
->index_size
>> 1);
971 BCTX_REFN(nvc0
->bufctx_3d
, 3D_IDX
, buf
, RD
);
974 list_for_each_entry(struct nvc0_resident
, resident
, &nvc0
->tex_head
, list
) {
975 nvc0_add_resident(nvc0
->bufctx_3d
, NVC0_BIND_3D_BINDLESS
, resident
->buf
,
979 list_for_each_entry(struct nvc0_resident
, resident
, &nvc0
->img_head
, list
) {
980 nvc0_add_resident(nvc0
->bufctx_3d
, NVC0_BIND_3D_BINDLESS
, resident
->buf
,
984 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_TEXT
, vram_domain
| NOUVEAU_BO_RD
,
987 nvc0_state_validate_3d(nvc0
, ~0);
989 if (nvc0
->vertprog
->vp
.need_draw_parameters
&& !info
->indirect
) {
991 BEGIN_NVC0(push
, NVC0_3D(CB_SIZE
), 3);
992 PUSH_DATA (push
, NVC0_CB_AUX_SIZE
);
993 PUSH_DATAh(push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
994 PUSH_DATA (push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
995 BEGIN_1IC0(push
, NVC0_3D(CB_POS
), 1 + 3);
996 PUSH_DATA (push
, NVC0_CB_AUX_DRAW_INFO
);
997 PUSH_DATA (push
, info
->index_bias
);
998 PUSH_DATA (push
, info
->start_instance
);
999 PUSH_DATA (push
, info
->drawid
);
1002 if (nvc0
->screen
->base
.class_3d
< NVE4_3D_CLASS
&&
1003 nvc0
->seamless_cube_map
!= nvc0
->state
.seamless_cube_map
) {
1004 nvc0
->state
.seamless_cube_map
= nvc0
->seamless_cube_map
;
1005 PUSH_SPACE(push
, 1);
1006 IMMED_NVC0(push
, NVC0_3D(TEX_MISC
),
1007 nvc0
->seamless_cube_map
? NVC0_3D_TEX_MISC_SEAMLESS_CUBE_MAP
: 0);
1010 push
->kick_notify
= nvc0_draw_vbo_kick_notify
;
1012 for (s
= 0; s
< 5 && !nvc0
->cb_dirty
; ++s
) {
1013 if (nvc0
->constbuf_coherent
[s
])
1014 nvc0
->cb_dirty
= true;
1017 if (nvc0
->cb_dirty
) {
1018 PUSH_SPACE(push
, 1);
1019 IMMED_NVC0(push
, NVC0_3D(MEM_BARRIER
), 0x1011);
1020 nvc0
->cb_dirty
= false;
1023 for (s
= 0; s
< 5; ++s
) {
1024 if (!nvc0
->textures_coherent
[s
])
1027 PUSH_SPACE(push
, nvc0
->num_textures
[s
] * 2);
1029 for (int i
= 0; i
< nvc0
->num_textures
[s
]; ++i
) {
1030 struct nv50_tic_entry
*tic
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
1031 if (!(nvc0
->textures_coherent
[s
] & (1 << i
)))
1034 BEGIN_NVC0(push
, NVC0_3D(TEX_CACHE_CTL
), 1);
1035 PUSH_DATA (push
, (tic
->id
<< 4) | 1);
1036 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, tex_cache_flush_count
, 1);
1040 if (nvc0
->state
.vbo_mode
) {
1042 nvc0_push_vbo_indirect(nvc0
, info
);
1044 nvc0_push_vbo(nvc0
, info
);
1048 /* space for base instance, flush, and prim restart */
1049 PUSH_SPACE(push
, 8);
1051 if (nvc0
->state
.instance_base
!= info
->start_instance
) {
1052 nvc0
->state
.instance_base
= info
->start_instance
;
1053 /* NOTE: this does not affect the shader input, should it ? */
1054 BEGIN_NVC0(push
, NVC0_3D(VB_INSTANCE_BASE
), 1);
1055 PUSH_DATA (push
, info
->start_instance
);
1058 nvc0
->base
.vbo_dirty
|= !!nvc0
->vtxbufs_coherent
;
1060 if (!nvc0
->base
.vbo_dirty
&& info
->index_size
&& !info
->has_user_indices
&&
1061 info
->index
.resource
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
1062 nvc0
->base
.vbo_dirty
= true;
1064 nvc0_update_prim_restart(nvc0
, info
->primitive_restart
, info
->restart_index
);
1066 if (nvc0
->base
.vbo_dirty
) {
1067 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
1068 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
1069 nvc0
->base
.vbo_dirty
= false;
1072 if (unlikely(info
->indirect
)) {
1073 nvc0_draw_indirect(nvc0
, info
);
1075 if (unlikely(info
->count_from_stream_output
)) {
1076 nvc0_draw_stream_output(nvc0
, info
);
1078 if (info
->index_size
) {
1079 bool shorten
= info
->max_index
<= 65535;
1081 if (info
->primitive_restart
&& info
->restart_index
> 65535)
1084 nvc0_draw_elements(nvc0
, shorten
, info
,
1085 info
->mode
, info
->start
, info
->count
,
1086 info
->instance_count
, info
->index_bias
, info
->index_size
);
1088 nvc0_draw_arrays(nvc0
,
1089 info
->mode
, info
->start
, info
->count
,
1090 info
->instance_count
);
1094 push
->kick_notify
= nvc0_default_kick_notify
;
1096 nvc0_release_user_vbufs(nvc0
);
1098 nouveau_pushbuf_bufctx(push
, NULL
);
1100 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_TEXT
);
1101 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_IDX
);
1102 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_BINDLESS
);