2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define NVC0_PUSH_EXPLICIT_SPACE_CHECKING
25 #include "pipe/p_context.h"
26 #include "pipe/p_state.h"
27 #include "util/u_inlines.h"
28 #include "util/u_format.h"
29 #include "translate/translate.h"
31 #include "nvc0/nvc0_context.h"
32 #include "nvc0/nvc0_query_hw.h"
33 #include "nvc0/nvc0_resource.h"
35 #include "nvc0/nvc0_3d.xml.h"
38 nvc0_vertex_state_delete(struct pipe_context
*pipe
,
41 struct nvc0_vertex_stateobj
*so
= hwcso
;
44 so
->translate
->release(so
->translate
);
49 nvc0_vertex_state_create(struct pipe_context
*pipe
,
50 unsigned num_elements
,
51 const struct pipe_vertex_element
*elements
)
53 struct nvc0_vertex_stateobj
*so
;
54 struct translate_key transkey
;
56 unsigned src_offset_max
= 0;
58 so
= MALLOC(sizeof(*so
) +
59 num_elements
* sizeof(struct nvc0_vertex_element
));
62 so
->num_elements
= num_elements
;
63 so
->instance_elts
= 0;
64 so
->instance_bufs
= 0;
65 so
->shared_slots
= false;
66 so
->need_conversion
= false;
68 memset(so
->vb_access_size
, 0, sizeof(so
->vb_access_size
));
70 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; ++i
)
71 so
->min_instance_div
[i
] = 0xffffffff;
73 transkey
.nr_elements
= 0;
74 transkey
.output_stride
= 0;
76 for (i
= 0; i
< num_elements
; ++i
) {
77 const struct pipe_vertex_element
*ve
= &elements
[i
];
78 const unsigned vbi
= ve
->vertex_buffer_index
;
80 enum pipe_format fmt
= ve
->src_format
;
82 so
->element
[i
].pipe
= elements
[i
];
83 so
->element
[i
].state
= nvc0_vertex_format
[fmt
].vtx
;
85 if (!so
->element
[i
].state
) {
86 switch (util_format_get_nr_components(fmt
)) {
87 case 1: fmt
= PIPE_FORMAT_R32_FLOAT
; break;
88 case 2: fmt
= PIPE_FORMAT_R32G32_FLOAT
; break;
89 case 3: fmt
= PIPE_FORMAT_R32G32B32_FLOAT
; break;
90 case 4: fmt
= PIPE_FORMAT_R32G32B32A32_FLOAT
; break;
96 so
->element
[i
].state
= nvc0_vertex_format
[fmt
].vtx
;
97 so
->need_conversion
= true;
98 pipe_debug_message(&nouveau_context(pipe
)->debug
, FALLBACK
,
99 "Converting vertex element %d, no hw format %s",
100 i
, util_format_name(ve
->src_format
));
102 size
= util_format_get_blocksize(fmt
);
104 src_offset_max
= MAX2(src_offset_max
, ve
->src_offset
);
106 if (so
->vb_access_size
[vbi
] < (ve
->src_offset
+ size
))
107 so
->vb_access_size
[vbi
] = ve
->src_offset
+ size
;
109 if (unlikely(ve
->instance_divisor
)) {
110 so
->instance_elts
|= 1 << i
;
111 so
->instance_bufs
|= 1 << vbi
;
112 if (ve
->instance_divisor
< so
->min_instance_div
[vbi
])
113 so
->min_instance_div
[vbi
] = ve
->instance_divisor
;
118 unsigned j
= transkey
.nr_elements
++;
120 ca
= util_format_description(fmt
)->channel
[0].size
/ 8;
121 if (ca
!= 1 && ca
!= 2)
124 transkey
.element
[j
].type
= TRANSLATE_ELEMENT_NORMAL
;
125 transkey
.element
[j
].input_format
= ve
->src_format
;
126 transkey
.element
[j
].input_buffer
= vbi
;
127 transkey
.element
[j
].input_offset
= ve
->src_offset
;
128 transkey
.element
[j
].instance_divisor
= ve
->instance_divisor
;
130 transkey
.output_stride
= align(transkey
.output_stride
, ca
);
131 transkey
.element
[j
].output_format
= fmt
;
132 transkey
.element
[j
].output_offset
= transkey
.output_stride
;
133 transkey
.output_stride
+= size
;
135 so
->element
[i
].state_alt
= so
->element
[i
].state
;
136 so
->element
[i
].state_alt
|= transkey
.element
[j
].output_offset
<< 7;
139 so
->element
[i
].state
|= i
<< NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
;
141 transkey
.output_stride
= align(transkey
.output_stride
, 4);
143 so
->size
= transkey
.output_stride
;
144 so
->translate
= translate_create(&transkey
);
146 if (so
->instance_elts
|| src_offset_max
>= (1 << 14))
148 so
->shared_slots
= true;
150 for (i
= 0; i
< num_elements
; ++i
) {
151 const unsigned b
= elements
[i
].vertex_buffer_index
;
152 const unsigned s
= elements
[i
].src_offset
;
153 so
->element
[i
].state
&= ~NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__MASK
;
154 so
->element
[i
].state
|= b
<< NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
;
155 so
->element
[i
].state
|= s
<< NVC0_3D_VERTEX_ATTRIB_FORMAT_OFFSET__SHIFT
;
160 #define NVC0_3D_VERTEX_ATTRIB_INACTIVE \
161 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT | \
162 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32 | NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
164 #define VTX_ATTR(a, c, t, s) \
165 ((NVC0_3D_VTX_ATTR_DEFINE_TYPE_##t) | \
166 (NVC0_3D_VTX_ATTR_DEFINE_SIZE_##s) | \
167 ((a) << NVC0_3D_VTX_ATTR_DEFINE_ATTR__SHIFT) | \
168 ((c) << NVC0_3D_VTX_ATTR_DEFINE_COMP__SHIFT))
171 nvc0_set_constant_vertex_attrib(struct nvc0_context
*nvc0
, const unsigned a
)
173 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
174 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[a
].pipe
;
175 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[ve
->vertex_buffer_index
];
177 const struct util_format_description
*desc
;
179 const void *src
= (const uint8_t *)vb
->buffer
.user
+ ve
->src_offset
;
180 assert(vb
->is_user_buffer
);
182 desc
= util_format_description(ve
->src_format
);
185 BEGIN_NVC0(push
, NVC0_3D(VTX_ATTR_DEFINE
), 5);
187 if (desc
->channel
[0].pure_integer
) {
188 if (desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
) {
189 mode
= VTX_ATTR(a
, 4, SINT
, 32);
190 desc
->unpack_rgba_sint(dst
, 0, src
, 0, 1, 1);
192 mode
= VTX_ATTR(a
, 4, UINT
, 32);
193 desc
->unpack_rgba_uint(dst
, 0, src
, 0, 1, 1);
196 mode
= VTX_ATTR(a
, 4, FLOAT
, 32);
197 desc
->unpack_rgba_float(dst
, 0, src
, 0, 1, 1);
204 nvc0_user_vbuf_range(struct nvc0_context
*nvc0
, int vbi
,
205 uint32_t *base
, uint32_t *size
)
207 if (unlikely(nvc0
->vertex
->instance_bufs
& (1 << vbi
))) {
208 const uint32_t div
= nvc0
->vertex
->min_instance_div
[vbi
];
209 *base
= nvc0
->instance_off
* nvc0
->vtxbuf
[vbi
].stride
;
210 *size
= (nvc0
->instance_max
/ div
) * nvc0
->vtxbuf
[vbi
].stride
+
211 nvc0
->vertex
->vb_access_size
[vbi
];
213 /* NOTE: if there are user buffers, we *must* have index bounds */
214 assert(nvc0
->vb_elt_limit
!= ~0);
215 *base
= nvc0
->vb_elt_first
* nvc0
->vtxbuf
[vbi
].stride
;
216 *size
= nvc0
->vb_elt_limit
* nvc0
->vtxbuf
[vbi
].stride
+
217 nvc0
->vertex
->vb_access_size
[vbi
];
222 nvc0_release_user_vbufs(struct nvc0_context
*nvc0
)
224 if (nvc0
->vbo_user
) {
225 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX_TMP
);
226 nouveau_scratch_done(&nvc0
->base
);
231 nvc0_update_user_vbufs(struct nvc0_context
*nvc0
)
233 uint64_t address
[PIPE_MAX_ATTRIBS
];
234 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
236 uint32_t written
= 0;
238 PUSH_SPACE(push
, nvc0
->vertex
->num_elements
* 8);
239 for (i
= 0; i
< nvc0
->vertex
->num_elements
; ++i
) {
240 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[i
].pipe
;
241 const unsigned b
= ve
->vertex_buffer_index
;
242 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[b
];
245 if (!(nvc0
->vbo_user
& (1 << b
)))
247 if (nvc0
->constant_vbos
& (1 << b
)) {
248 nvc0_set_constant_vertex_attrib(nvc0
, i
);
251 nvc0_user_vbuf_range(nvc0
, b
, &base
, &size
);
253 if (!(written
& (1 << b
))) {
254 struct nouveau_bo
*bo
;
255 const uint32_t bo_flags
= NOUVEAU_BO_RD
| NOUVEAU_BO_GART
;
257 address
[b
] = nouveau_scratch_data(&nvc0
->base
, vb
->buffer
.user
,
260 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, bo_flags
, bo
);
262 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, user_buffer_upload_bytes
, size
);
265 BEGIN_1IC0(push
, NVC0_3D(MACRO_VERTEX_ARRAY_SELECT
), 5);
267 PUSH_DATAh(push
, address
[b
] + base
+ size
- 1);
268 PUSH_DATA (push
, address
[b
] + base
+ size
- 1);
269 PUSH_DATAh(push
, address
[b
] + ve
->src_offset
);
270 PUSH_DATA (push
, address
[b
] + ve
->src_offset
);
272 nvc0
->base
.vbo_dirty
= true;
276 nvc0_update_user_vbufs_shared(struct nvc0_context
*nvc0
)
278 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
279 uint32_t mask
= nvc0
->vbo_user
& ~nvc0
->constant_vbos
;
281 PUSH_SPACE(push
, nvc0
->num_vtxbufs
* 8);
283 struct nouveau_bo
*bo
;
284 const uint32_t bo_flags
= NOUVEAU_BO_RD
| NOUVEAU_BO_GART
;
287 const int b
= ffs(mask
) - 1;
290 nvc0_user_vbuf_range(nvc0
, b
, &base
, &size
);
292 address
= nouveau_scratch_data(&nvc0
->base
, nvc0
->vtxbuf
[b
].buffer
.user
,
295 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, bo_flags
, bo
);
297 BEGIN_1IC0(push
, NVC0_3D(MACRO_VERTEX_ARRAY_SELECT
), 5);
299 PUSH_DATAh(push
, address
+ base
+ size
- 1);
300 PUSH_DATA (push
, address
+ base
+ size
- 1);
301 PUSH_DATAh(push
, address
);
302 PUSH_DATA (push
, address
);
304 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, user_buffer_upload_bytes
, size
);
307 mask
= nvc0
->state
.constant_elts
;
309 int i
= ffs(mask
) - 1;
311 nvc0_set_constant_vertex_attrib(nvc0
, i
);
316 nvc0_validate_vertex_buffers(struct nvc0_context
*nvc0
)
318 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
319 const struct nvc0_vertex_stateobj
*vertex
= nvc0
->vertex
;
323 PUSH_SPACE(push
, vertex
->num_elements
* 8);
324 for (i
= 0; i
< vertex
->num_elements
; ++i
) {
325 const struct nvc0_vertex_element
*ve
;
326 const struct pipe_vertex_buffer
*vb
;
327 struct nv04_resource
*res
;
329 unsigned limit
, offset
;
331 if (nvc0
->state
.constant_elts
& (1 << i
))
333 ve
= &vertex
->element
[i
];
334 b
= ve
->pipe
.vertex_buffer_index
;
335 vb
= &nvc0
->vtxbuf
[b
];
337 if (nvc0
->vbo_user
& (1 << b
)) {
338 if (!(nvc0
->constant_vbos
& (1 << b
))) {
339 if (ve
->pipe
.instance_divisor
) {
340 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_DIVISOR(i
)), 1);
341 PUSH_DATA (push
, ve
->pipe
.instance_divisor
);
343 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 1);
344 PUSH_DATA (push
, (1 << 12) | vb
->stride
);
346 /* address/value set in nvc0_update_user_vbufs */
349 res
= nv04_resource(vb
->buffer
.resource
);
350 offset
= ve
->pipe
.src_offset
+ vb
->buffer_offset
;
351 limit
= vb
->buffer
.resource
->width0
- 1;
353 if (unlikely(ve
->pipe
.instance_divisor
)) {
354 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 4);
355 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
356 PUSH_DATAh(push
, res
->address
+ offset
);
357 PUSH_DATA (push
, res
->address
+ offset
);
358 PUSH_DATA (push
, ve
->pipe
.instance_divisor
);
360 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 3);
361 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
362 PUSH_DATAh(push
, res
->address
+ offset
);
363 PUSH_DATA (push
, res
->address
+ offset
);
365 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(i
)), 2);
366 PUSH_DATAh(push
, res
->address
+ limit
);
367 PUSH_DATA (push
, res
->address
+ limit
);
369 if (!(refd
& (1 << b
))) {
371 BCTX_REFN(nvc0
->bufctx_3d
, 3D_VTX
, res
, RD
);
375 nvc0_update_user_vbufs(nvc0
);
379 nvc0_validate_vertex_buffers_shared(struct nvc0_context
*nvc0
)
381 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
383 const uint32_t mask
= nvc0
->vbo_user
;
385 PUSH_SPACE(push
, nvc0
->num_vtxbufs
* 8 + nvc0
->vertex
->num_elements
);
386 for (b
= 0; b
< nvc0
->num_vtxbufs
; ++b
) {
387 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[b
];
388 struct nv04_resource
*buf
;
389 uint32_t offset
, limit
;
391 if (mask
& (1 << b
)) {
392 if (!(nvc0
->constant_vbos
& (1 << b
))) {
393 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 1);
394 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
396 /* address/value set in nvc0_update_user_vbufs_shared */
398 } else if (!vb
->buffer
.resource
) {
399 /* there can be holes in the vertex buffer lists */
400 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 0);
403 buf
= nv04_resource(vb
->buffer
.resource
);
404 offset
= vb
->buffer_offset
;
405 limit
= buf
->base
.width0
- 1;
407 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 3);
408 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| vb
->stride
);
409 PUSH_DATAh(push
, buf
->address
+ offset
);
410 PUSH_DATA (push
, buf
->address
+ offset
);
411 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(b
)), 2);
412 PUSH_DATAh(push
, buf
->address
+ limit
);
413 PUSH_DATA (push
, buf
->address
+ limit
);
415 BCTX_REFN(nvc0
->bufctx_3d
, 3D_VTX
, buf
, RD
);
417 /* If there are more elements than buffers, we might not have unset
418 * fetching on the later elements.
420 for (; b
< nvc0
->vertex
->num_elements
; ++b
)
421 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(b
)), 0);
424 nvc0_update_user_vbufs_shared(nvc0
);
428 nvc0_vertex_arrays_validate(struct nvc0_context
*nvc0
)
430 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
431 struct nvc0_vertex_stateobj
*vertex
= nvc0
->vertex
;
432 struct nvc0_vertex_element
*ve
;
438 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX
);
441 if (unlikely(vertex
->need_conversion
) ||
442 unlikely(nvc0
->vertprog
->vp
.edgeflag
< PIPE_MAX_ATTRIBS
)) {
444 } else if (nvc0
->vbo_user
& ~nvc0
->constant_vbos
) {
445 vbo_mode
= nvc0
->vbo_push_hint
? 1 : 0;
449 const_vbos
= vbo_mode
? 0 : nvc0
->constant_vbos
;
451 update_vertex
= (nvc0
->dirty_3d
& NVC0_NEW_3D_VERTEX
) ||
452 (const_vbos
!= nvc0
->state
.constant_vbos
) ||
453 (vbo_mode
!= nvc0
->state
.vbo_mode
);
456 const unsigned n
= MAX2(vertex
->num_elements
, nvc0
->state
.num_vtxelts
);
458 nvc0
->state
.constant_vbos
= const_vbos
;
459 nvc0
->state
.constant_elts
= 0;
460 nvc0
->state
.num_vtxelts
= vertex
->num_elements
;
461 nvc0
->state
.vbo_mode
= vbo_mode
;
463 if (unlikely(vbo_mode
)) {
464 if (unlikely(nvc0
->state
.instance_elts
& 3)) {
465 /* translate mode uses only 2 vertex buffers */
466 nvc0
->state
.instance_elts
&= ~3;
468 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(0)), 2);
473 PUSH_SPACE(push
, n
* 2 + 4);
475 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(0)), n
);
476 for (i
= 0; i
< vertex
->num_elements
; ++i
)
477 PUSH_DATA(push
, vertex
->element
[i
].state_alt
);
479 PUSH_DATA(push
, NVC0_3D_VERTEX_ATTRIB_INACTIVE
);
481 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(0)), 1);
482 PUSH_DATA (push
, (1 << 12) | vertex
->size
);
483 for (i
= 1; i
< n
; ++i
)
484 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 0);
486 uint32_t *restrict data
;
488 if (unlikely(vertex
->instance_elts
!= nvc0
->state
.instance_elts
)) {
489 nvc0
->state
.instance_elts
= vertex
->instance_elts
;
490 assert(n
); /* if (n == 0), both masks should be 0 */
492 BEGIN_NVC0(push
, NVC0_3D(MACRO_VERTEX_ARRAY_PER_INSTANCE
), 2);
494 PUSH_DATA (push
, vertex
->instance_elts
);
497 PUSH_SPACE(push
, n
* 2 + 1);
498 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(0)), n
);
501 for (i
= 0; i
< vertex
->num_elements
; ++i
) {
502 ve
= &vertex
->element
[i
];
504 if (unlikely(const_vbos
& (1 << ve
->pipe
.vertex_buffer_index
))) {
505 nvc0
->state
.constant_elts
|= 1 << i
;
506 data
[i
] |= NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
;
507 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 0);
511 data
[i
] = NVC0_3D_VERTEX_ATTRIB_INACTIVE
;
512 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(i
)), 0);
516 if (nvc0
->state
.vbo_mode
) /* using translate, don't set up arrays here */
519 if (vertex
->shared_slots
)
520 nvc0_validate_vertex_buffers_shared(nvc0
);
522 nvc0_validate_vertex_buffers(nvc0
);
526 nvc0_idxbuf_validate(struct nvc0_context
*nvc0
)
528 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
529 struct nv04_resource
*buf
= nv04_resource(nvc0
->idxbuf
.buffer
);
532 assert(nouveau_resource_mapped_by_gpu(&buf
->base
));
535 BEGIN_NVC0(push
, NVC0_3D(INDEX_ARRAY_START_HIGH
), 5);
536 PUSH_DATAh(push
, buf
->address
+ nvc0
->idxbuf
.offset
);
537 PUSH_DATA (push
, buf
->address
+ nvc0
->idxbuf
.offset
);
538 PUSH_DATAh(push
, buf
->address
+ buf
->base
.width0
- 1);
539 PUSH_DATA (push
, buf
->address
+ buf
->base
.width0
- 1);
540 PUSH_DATA (push
, nvc0
->idxbuf
.index_size
>> 1);
542 BCTX_REFN(nvc0
->bufctx_3d
, 3D_IDX
, buf
, RD
);
545 #define NVC0_PRIM_GL_CASE(n) \
546 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
548 static inline unsigned
549 nvc0_prim_gl(unsigned prim
)
552 NVC0_PRIM_GL_CASE(POINTS
);
553 NVC0_PRIM_GL_CASE(LINES
);
554 NVC0_PRIM_GL_CASE(LINE_LOOP
);
555 NVC0_PRIM_GL_CASE(LINE_STRIP
);
556 NVC0_PRIM_GL_CASE(TRIANGLES
);
557 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP
);
558 NVC0_PRIM_GL_CASE(TRIANGLE_FAN
);
559 NVC0_PRIM_GL_CASE(QUADS
);
560 NVC0_PRIM_GL_CASE(QUAD_STRIP
);
561 NVC0_PRIM_GL_CASE(POLYGON
);
562 NVC0_PRIM_GL_CASE(LINES_ADJACENCY
);
563 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY
);
564 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY
);
565 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY
);
566 NVC0_PRIM_GL_CASE(PATCHES
);
568 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS
;
573 nvc0_draw_vbo_kick_notify(struct nouveau_pushbuf
*push
)
575 struct nvc0_screen
*screen
= push
->user_priv
;
577 nouveau_fence_update(&screen
->base
, true);
579 NOUVEAU_DRV_STAT(&screen
->base
, pushbuf_count
, 1);
583 nvc0_draw_arrays(struct nvc0_context
*nvc0
,
584 unsigned mode
, unsigned start
, unsigned count
,
585 unsigned instance_count
)
587 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
590 if (nvc0
->state
.index_bias
) {
591 /* index_bias is implied 0 if !info->indexed (really ?) */
592 /* TODO: can we deactivate it for the VERTEX_BUFFER_FIRST command ? */
594 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
595 IMMED_NVC0(push
, NVC0_3D(VERTEX_ID_BASE
), 0);
596 nvc0
->state
.index_bias
= 0;
599 prim
= nvc0_prim_gl(mode
);
601 while (instance_count
--) {
603 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
604 PUSH_DATA (push
, prim
);
605 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
606 PUSH_DATA (push
, start
);
607 PUSH_DATA (push
, count
);
608 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
610 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
612 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_array
, 1);
616 nvc0_draw_elements_inline_u08(struct nouveau_pushbuf
*push
, const uint8_t *map
,
617 unsigned start
, unsigned count
)
624 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U32
), count
& 3);
625 for (i
= 0; i
< (count
& 3); ++i
)
626 PUSH_DATA(push
, *map
++);
630 unsigned i
, nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
* 4) / 4;
632 PUSH_SPACE(push
, nr
+ 1);
633 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U8
), nr
);
634 for (i
= 0; i
< nr
; ++i
) {
636 (map
[3] << 24) | (map
[2] << 16) | (map
[1] << 8) | map
[0]);
644 nvc0_draw_elements_inline_u16(struct nouveau_pushbuf
*push
, const uint16_t *map
,
645 unsigned start
, unsigned count
)
652 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
653 PUSH_DATA (push
, *map
++);
656 unsigned i
, nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
* 2) / 2;
658 PUSH_SPACE(push
, nr
+ 1);
659 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U16
), nr
);
660 for (i
= 0; i
< nr
; ++i
) {
661 PUSH_DATA(push
, (map
[1] << 16) | map
[0]);
669 nvc0_draw_elements_inline_u32(struct nouveau_pushbuf
*push
, const uint32_t *map
,
670 unsigned start
, unsigned count
)
675 const unsigned nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
);
677 PUSH_SPACE(push
, nr
+ 1);
678 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U32
), nr
);
679 PUSH_DATAp(push
, map
, nr
);
687 nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf
*push
,
689 unsigned start
, unsigned count
)
696 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
697 PUSH_DATA (push
, *map
++);
700 unsigned i
, nr
= MIN2(count
, NV04_PFIFO_MAX_PACKET_LEN
* 2) / 2;
702 PUSH_SPACE(push
, nr
+ 1);
703 BEGIN_NIC0(push
, NVC0_3D(VB_ELEMENT_U16
), nr
);
704 for (i
= 0; i
< nr
; ++i
) {
705 PUSH_DATA(push
, (map
[1] << 16) | map
[0]);
713 nvc0_draw_elements(struct nvc0_context
*nvc0
, bool shorten
,
714 unsigned mode
, unsigned start
, unsigned count
,
715 unsigned instance_count
, int32_t index_bias
)
717 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
719 const unsigned index_size
= nvc0
->idxbuf
.index_size
;
721 prim
= nvc0_prim_gl(mode
);
723 if (index_bias
!= nvc0
->state
.index_bias
) {
725 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_BASE
), 1);
726 PUSH_DATA (push
, index_bias
);
727 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ID_BASE
), 1);
728 PUSH_DATA (push
, index_bias
);
729 nvc0
->state
.index_bias
= index_bias
;
732 if (nvc0
->idxbuf
.buffer
) {
734 IMMED_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), prim
);
737 BEGIN_NVC0(push
, NVC0_3D(INDEX_BATCH_FIRST
), 2);
738 PUSH_DATA (push
, start
);
739 PUSH_DATA (push
, count
);
740 if (--instance_count
) {
741 BEGIN_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 2);
743 PUSH_DATA (push
, prim
| NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
);
745 } while (instance_count
);
746 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
748 const void *data
= nvc0
->idxbuf
.user_buffer
;
750 while (instance_count
--) {
752 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
753 PUSH_DATA (push
, prim
);
754 switch (index_size
) {
756 nvc0_draw_elements_inline_u08(push
, data
, start
, count
);
759 nvc0_draw_elements_inline_u16(push
, data
, start
, count
);
763 nvc0_draw_elements_inline_u32_short(push
, data
, start
, count
);
765 nvc0_draw_elements_inline_u32(push
, data
, start
, count
);
772 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
774 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
777 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_indexed
, 1);
781 nvc0_draw_stream_output(struct nvc0_context
*nvc0
,
782 const struct pipe_draw_info
*info
)
784 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
785 struct nvc0_so_target
*so
= nvc0_so_target(info
->count_from_stream_output
);
786 struct nv04_resource
*res
= nv04_resource(so
->pipe
.buffer
);
787 unsigned mode
= nvc0_prim_gl(info
->mode
);
788 unsigned num_instances
= info
->instance_count
;
790 if (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
) {
791 res
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
793 IMMED_NVC0(push
, NVC0_3D(SERIALIZE
), 0);
794 nvc0_hw_query_fifo_wait(nvc0
, nvc0_query(so
->pq
));
795 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
796 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
798 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, gpu_serialize_count
, 1);
801 while (num_instances
--) {
802 nouveau_pushbuf_space(push
, 16, 0, 1);
803 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
804 PUSH_DATA (push
, mode
);
805 BEGIN_NVC0(push
, NVC0_3D(DRAW_TFB_BASE
), 1);
807 BEGIN_NVC0(push
, NVC0_3D(DRAW_TFB_STRIDE
), 1);
808 PUSH_DATA (push
, so
->stride
);
809 BEGIN_NVC0(push
, NVC0_3D(DRAW_TFB_BYTES
), 1);
810 nvc0_hw_query_pushbuf_submit(push
, nvc0_query(so
->pq
), 0x4);
811 IMMED_NVC0(push
, NVC0_3D(VERTEX_END_GL
), 0);
813 mode
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
818 nvc0_draw_indirect(struct nvc0_context
*nvc0
, const struct pipe_draw_info
*info
)
820 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
821 struct nv04_resource
*buf
= nv04_resource(info
->indirect
->buffer
);
822 struct nv04_resource
*buf_count
= nv04_resource(info
->indirect
->indirect_draw_count
);
823 unsigned size
, macro
, count
= info
->indirect
->draw_count
, drawid
= info
->drawid
;
824 uint32_t offset
= buf
->offset
+ info
->indirect
->offset
;
825 struct nvc0_screen
*screen
= nvc0
->screen
;
829 /* must make FIFO wait for engines idle before continuing to process */
830 if ((buf
->fence_wr
&& !nouveau_fence_signalled(buf
->fence_wr
)) ||
831 (buf_count
&& buf_count
->fence_wr
&&
832 !nouveau_fence_signalled(buf_count
->fence_wr
))) {
833 IMMED_NVC0(push
, SUBC_3D(NV10_SUBCHAN_REF_CNT
), 0);
836 /* Queue things up to let the macros write params to the driver constbuf */
837 BEGIN_NVC0(push
, NVC0_3D(CB_SIZE
), 3);
838 PUSH_DATA (push
, NVC0_CB_AUX_SIZE
);
839 PUSH_DATAh(push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
840 PUSH_DATA (push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
841 BEGIN_NVC0(push
, NVC0_3D(CB_POS
), 1);
842 PUSH_DATA (push
, NVC0_CB_AUX_DRAW_INFO
);
845 assert(nvc0
->idxbuf
.buffer
);
846 assert(nouveau_resource_mapped_by_gpu(nvc0
->idxbuf
.buffer
));
849 macro
= NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT
;
851 macro
= NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT
;
853 if (nvc0
->state
.index_bias
) {
854 /* index_bias is implied 0 if !info->indexed (really ?) */
855 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
856 IMMED_NVC0(push
, NVC0_3D(VERTEX_ID_BASE
), 0);
857 nvc0
->state
.index_bias
= 0;
861 macro
= NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT
;
863 macro
= NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT
;
866 /* If the stride is not the natural stride, we have to stick a separate
867 * push data reference for each draw. Otherwise it can all go in as one.
868 * Of course there is a maximum packet size, so we have to break things up
869 * along those borders as well.
872 unsigned draws
= count
, pushes
, i
;
873 if (info
->indirect
->stride
== size
* 4) {
874 draws
= MIN2(draws
, (NV04_PFIFO_MAX_PACKET_LEN
- 4) / size
);
877 draws
= MIN2(draws
, 32);
881 nouveau_pushbuf_space(push
, 16, 0, pushes
+ !!buf_count
);
882 PUSH_REFN(push
, buf
->bo
, NOUVEAU_BO_RD
| buf
->domain
);
884 PUSH_REFN(push
, buf_count
->bo
, NOUVEAU_BO_RD
| buf_count
->domain
);
886 NVC0_FIFO_PKHDR_1I(0, macro
, 3 + !!buf_count
+ draws
* size
));
887 PUSH_DATA(push
, nvc0_prim_gl(info
->mode
));
888 PUSH_DATA(push
, drawid
);
889 PUSH_DATA(push
, draws
);
891 nouveau_pushbuf_data(push
,
893 buf_count
->offset
+ info
->indirect
->indirect_draw_count_offset
,
894 NVC0_IB_ENTRY_1_NO_PREFETCH
| 4);
897 nouveau_pushbuf_data(push
,
899 NVC0_IB_ENTRY_1_NO_PREFETCH
| (size
* 4 * draws
));
900 offset
+= draws
* info
->indirect
->stride
;
902 for (i
= 0; i
< pushes
; i
++) {
903 nouveau_pushbuf_data(push
,
905 NVC0_IB_ENTRY_1_NO_PREFETCH
| (size
* 4));
906 offset
+= info
->indirect
->stride
;
915 nvc0_update_prim_restart(struct nvc0_context
*nvc0
, bool en
, uint32_t index
)
917 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
919 if (en
!= nvc0
->state
.prim_restart
) {
921 BEGIN_NVC0(push
, NVC0_3D(PRIM_RESTART_ENABLE
), 2);
923 PUSH_DATA (push
, index
);
925 IMMED_NVC0(push
, NVC0_3D(PRIM_RESTART_ENABLE
), 0);
927 nvc0
->state
.prim_restart
= en
;
930 BEGIN_NVC0(push
, NVC0_3D(PRIM_RESTART_INDEX
), 1);
931 PUSH_DATA (push
, index
);
936 nvc0_draw_vbo(struct pipe_context
*pipe
, const struct pipe_draw_info
*info
)
938 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
939 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
940 struct nvc0_screen
*screen
= nvc0
->screen
;
943 /* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
944 nvc0
->vb_elt_first
= info
->min_index
+ info
->index_bias
;
945 nvc0
->vb_elt_limit
= info
->max_index
- info
->min_index
;
946 nvc0
->instance_off
= info
->start_instance
;
947 nvc0
->instance_max
= info
->instance_count
- 1;
949 /* For picking only a few vertices from a large user buffer, push is better,
950 * if index count is larger and we expect repeated vertices, suggest upload.
952 nvc0
->vbo_push_hint
=
953 !info
->indirect
&& info
->indexed
&&
954 (nvc0
->vb_elt_limit
>= (info
->count
* 2));
956 /* Check whether we want to switch vertex-submission mode. */
957 if (nvc0
->vbo_user
&& !(nvc0
->dirty_3d
& (NVC0_NEW_3D_ARRAYS
| NVC0_NEW_3D_VERTEX
))) {
958 if (nvc0
->vbo_push_hint
!= !!nvc0
->state
.vbo_mode
)
959 if (nvc0
->state
.vbo_mode
!= 3)
960 nvc0
->dirty_3d
|= NVC0_NEW_3D_ARRAYS
;
962 if (!(nvc0
->dirty_3d
& NVC0_NEW_3D_ARRAYS
) && nvc0
->state
.vbo_mode
== 0) {
963 if (nvc0
->vertex
->shared_slots
)
964 nvc0_update_user_vbufs_shared(nvc0
);
966 nvc0_update_user_vbufs(nvc0
);
970 if (info
->mode
== PIPE_PRIM_PATCHES
&&
971 nvc0
->state
.patch_vertices
!= info
->vertices_per_patch
) {
972 nvc0
->state
.patch_vertices
= info
->vertices_per_patch
;
974 IMMED_NVC0(push
, NVC0_3D(PATCH_VERTICES
), nvc0
->state
.patch_vertices
);
977 nvc0_state_validate_3d(nvc0
, ~0);
979 if (nvc0
->vertprog
->vp
.need_draw_parameters
&& !info
->indirect
) {
981 BEGIN_NVC0(push
, NVC0_3D(CB_SIZE
), 3);
982 PUSH_DATA (push
, NVC0_CB_AUX_SIZE
);
983 PUSH_DATAh(push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
984 PUSH_DATA (push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
985 BEGIN_1IC0(push
, NVC0_3D(CB_POS
), 1 + 3);
986 PUSH_DATA (push
, NVC0_CB_AUX_DRAW_INFO
);
987 PUSH_DATA (push
, info
->index_bias
);
988 PUSH_DATA (push
, info
->start_instance
);
989 PUSH_DATA (push
, info
->drawid
);
992 if (nvc0
->screen
->base
.class_3d
< NVE4_3D_CLASS
&&
993 nvc0
->seamless_cube_map
!= nvc0
->state
.seamless_cube_map
) {
994 nvc0
->state
.seamless_cube_map
= nvc0
->seamless_cube_map
;
996 IMMED_NVC0(push
, NVC0_3D(TEX_MISC
),
997 nvc0
->seamless_cube_map
? NVC0_3D_TEX_MISC_SEAMLESS_CUBE_MAP
: 0);
1000 push
->kick_notify
= nvc0_draw_vbo_kick_notify
;
1002 for (s
= 0; s
< 5 && !nvc0
->cb_dirty
; ++s
) {
1003 if (nvc0
->constbuf_coherent
[s
])
1004 nvc0
->cb_dirty
= true;
1007 if (nvc0
->cb_dirty
) {
1008 PUSH_SPACE(push
, 1);
1009 IMMED_NVC0(push
, NVC0_3D(MEM_BARRIER
), 0x1011);
1010 nvc0
->cb_dirty
= false;
1013 for (s
= 0; s
< 5; ++s
) {
1014 if (!nvc0
->textures_coherent
[s
])
1017 PUSH_SPACE(push
, nvc0
->num_textures
[s
] * 2);
1019 for (int i
= 0; i
< nvc0
->num_textures
[s
]; ++i
) {
1020 struct nv50_tic_entry
*tic
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
1021 if (!(nvc0
->textures_coherent
[s
] & (1 << i
)))
1024 BEGIN_NVC0(push
, NVC0_3D(TEX_CACHE_CTL
), 1);
1025 PUSH_DATA (push
, (tic
->id
<< 4) | 1);
1026 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, tex_cache_flush_count
, 1);
1030 if (nvc0
->state
.vbo_mode
) {
1031 nvc0_push_vbo(nvc0
, info
);
1032 push
->kick_notify
= nvc0_default_kick_notify
;
1033 nouveau_pushbuf_bufctx(push
, NULL
);
1037 /* space for base instance, flush, and prim restart */
1038 PUSH_SPACE(push
, 8);
1040 if (nvc0
->state
.instance_base
!= info
->start_instance
) {
1041 nvc0
->state
.instance_base
= info
->start_instance
;
1042 /* NOTE: this does not affect the shader input, should it ? */
1043 BEGIN_NVC0(push
, NVC0_3D(VB_INSTANCE_BASE
), 1);
1044 PUSH_DATA (push
, info
->start_instance
);
1047 nvc0
->base
.vbo_dirty
|= !!nvc0
->vtxbufs_coherent
;
1049 if (!nvc0
->base
.vbo_dirty
&& nvc0
->idxbuf
.buffer
&&
1050 nvc0
->idxbuf
.buffer
->flags
& PIPE_RESOURCE_FLAG_MAP_COHERENT
)
1051 nvc0
->base
.vbo_dirty
= true;
1053 nvc0_update_prim_restart(nvc0
, info
->primitive_restart
, info
->restart_index
);
1055 if (nvc0
->base
.vbo_dirty
) {
1056 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
1057 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
1058 nvc0
->base
.vbo_dirty
= false;
1061 if (unlikely(info
->indirect
)) {
1062 nvc0_draw_indirect(nvc0
, info
);
1064 if (unlikely(info
->count_from_stream_output
)) {
1065 nvc0_draw_stream_output(nvc0
, info
);
1067 if (info
->indexed
) {
1068 bool shorten
= info
->max_index
<= 65535;
1070 if (info
->primitive_restart
&& info
->restart_index
> 65535)
1073 nvc0_draw_elements(nvc0
, shorten
,
1074 info
->mode
, info
->start
, info
->count
,
1075 info
->instance_count
, info
->index_bias
);
1077 nvc0_draw_arrays(nvc0
,
1078 info
->mode
, info
->start
, info
->count
,
1079 info
->instance_count
);
1081 push
->kick_notify
= nvc0_default_kick_notify
;
1083 nvc0_release_user_vbufs(nvc0
);
1085 nouveau_pushbuf_bufctx(push
, NULL
);