2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
11 #include "nvc0/nvc0_3d.xml.h"
14 struct nouveau_pushbuf
*push
;
16 struct translate
*translate
;
21 uint32_t restart_index
;
22 uint32_t start_instance
;
37 static void nvc0_push_upload_vertex_ids(struct push_context
*,
38 struct nvc0_context
*,
39 const struct pipe_draw_info
*);
42 nvc0_push_context_init(struct nvc0_context
*nvc0
, struct push_context
*ctx
)
44 ctx
->push
= nvc0
->base
.pushbuf
;
46 ctx
->translate
= nvc0
->vertex
->translate
;
47 ctx
->vertex_size
= nvc0
->vertex
->size
;
51 nvc0
->vertprog
->vp
.need_vertex_id
&& (nvc0
->vertex
->num_elements
< 32);
53 ctx
->edgeflag
.value
= true;
54 ctx
->edgeflag
.enabled
= nvc0
->vertprog
->vp
.edgeflag
< PIPE_MAX_ATTRIBS
;
56 /* silence warnings */
57 ctx
->edgeflag
.data
= NULL
;
58 ctx
->edgeflag
.stride
= 0;
59 ctx
->edgeflag
.width
= 0;
63 nvc0_vertex_configure_translate(struct nvc0_context
*nvc0
, int32_t index_bias
)
65 struct translate
*translate
= nvc0
->vertex
->translate
;
68 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
) {
70 const struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[i
];
72 if (likely(vb
->is_user_buffer
))
73 map
= (const uint8_t *)vb
->buffer
.user
;
75 map
= nouveau_resource_map_offset(&nvc0
->base
,
76 nv04_resource(vb
->buffer
.resource
), vb
->buffer_offset
, NOUVEAU_BO_RD
);
78 if (index_bias
&& !unlikely(nvc0
->vertex
->instance_bufs
& (1 << i
)))
79 map
+= (intptr_t)index_bias
* vb
->stride
;
81 translate
->set_buffer(translate
, i
, map
, vb
->stride
, ~0);
86 nvc0_push_map_idxbuf(struct push_context
*ctx
, struct nvc0_context
*nvc0
,
87 const struct pipe_draw_info
*info
,
90 if (!info
->has_user_indices
) {
91 struct nv04_resource
*buf
= nv04_resource(info
->index
.resource
);
92 ctx
->idxbuf
= nouveau_resource_map_offset(&nvc0
->base
,
93 buf
, offset
, NOUVEAU_BO_RD
);
95 ctx
->idxbuf
= info
->index
.user
;
100 nvc0_push_map_edgeflag(struct push_context
*ctx
, struct nvc0_context
*nvc0
,
103 unsigned attr
= nvc0
->vertprog
->vp
.edgeflag
;
104 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[attr
].pipe
;
105 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[ve
->vertex_buffer_index
];
106 struct nv04_resource
*buf
= nv04_resource(vb
->buffer
.resource
);
108 ctx
->edgeflag
.stride
= vb
->stride
;
109 ctx
->edgeflag
.width
= util_format_get_blocksize(ve
->src_format
);
110 if (!vb
->is_user_buffer
) {
111 unsigned offset
= vb
->buffer_offset
+ ve
->src_offset
;
112 ctx
->edgeflag
.data
= nouveau_resource_map_offset(&nvc0
->base
,
113 buf
, offset
, NOUVEAU_BO_RD
);
115 ctx
->edgeflag
.data
= (const uint8_t *)vb
->buffer
.user
+ ve
->src_offset
;
119 ctx
->edgeflag
.data
+= (intptr_t)index_bias
* vb
->stride
;
122 static inline unsigned
123 prim_restart_search_i08(const uint8_t *elts
, unsigned push
, uint8_t index
)
126 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
130 static inline unsigned
131 prim_restart_search_i16(const uint16_t *elts
, unsigned push
, uint16_t index
)
134 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
138 static inline unsigned
139 prim_restart_search_i32(const uint32_t *elts
, unsigned push
, uint32_t index
)
142 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
147 ef_value_8(const struct push_context
*ctx
, uint32_t index
)
149 uint8_t *pf
= (uint8_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
154 ef_value_32(const struct push_context
*ctx
, uint32_t index
)
156 uint32_t *pf
= (uint32_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
161 ef_toggle(struct push_context
*ctx
)
163 ctx
->edgeflag
.value
= !ctx
->edgeflag
.value
;
164 return ctx
->edgeflag
.value
;
167 static inline unsigned
168 ef_toggle_search_i08(struct push_context
*ctx
, const uint8_t *elts
, unsigned n
)
171 bool ef
= ctx
->edgeflag
.value
;
172 if (ctx
->edgeflag
.width
== 1)
173 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
175 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
179 static inline unsigned
180 ef_toggle_search_i16(struct push_context
*ctx
, const uint16_t *elts
, unsigned n
)
183 bool ef
= ctx
->edgeflag
.value
;
184 if (ctx
->edgeflag
.width
== 1)
185 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
187 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
191 static inline unsigned
192 ef_toggle_search_i32(struct push_context
*ctx
, const uint32_t *elts
, unsigned n
)
195 bool ef
= ctx
->edgeflag
.value
;
196 if (ctx
->edgeflag
.width
== 1)
197 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
199 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
203 static inline unsigned
204 ef_toggle_search_seq(struct push_context
*ctx
, unsigned start
, unsigned n
)
207 bool ef
= ctx
->edgeflag
.value
;
208 if (ctx
->edgeflag
.width
== 1)
209 for (i
= 0; i
< n
&& ef_value_8(ctx
, start
++) == ef
; ++i
);
211 for (i
= 0; i
< n
&& ef_value_32(ctx
, start
++) == ef
; ++i
);
216 nvc0_push_setup_vertex_array(struct nvc0_context
*nvc0
, const unsigned count
)
218 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
219 struct nouveau_bo
*bo
;
221 const unsigned size
= count
* nvc0
->vertex
->size
;
223 void *const dest
= nouveau_scratch_get(&nvc0
->base
, size
, &va
, &bo
);
225 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
226 PUSH_DATAh(push
, va
);
227 PUSH_DATA (push
, va
);
228 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
229 PUSH_DATAh(push
, va
+ size
- 1);
230 PUSH_DATA (push
, va
+ size
- 1);
232 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
234 nouveau_pushbuf_validate(push
);
240 disp_vertices_i08(struct push_context
*ctx
, unsigned start
, unsigned count
)
242 struct nouveau_pushbuf
*push
= ctx
->push
;
243 struct translate
*translate
= ctx
->translate
;
244 const uint8_t *restrict elts
= (uint8_t *)ctx
->idxbuf
+ start
;
250 if (unlikely(ctx
->prim_restart
))
251 nR
= prim_restart_search_i08(elts
, nR
, ctx
->restart_index
);
253 translate
->run_elts8(translate
, elts
, nR
,
254 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
256 ctx
->dest
+= nR
* ctx
->vertex_size
;
261 if (unlikely(ctx
->edgeflag
.enabled
))
262 nE
= ef_toggle_search_i08(ctx
, elts
, nR
);
265 if (likely(nE
>= 2)) {
266 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
267 PUSH_DATA (push
, pos
);
268 PUSH_DATA (push
, nE
);
272 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
274 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
275 PUSH_DATA (push
, pos
);
278 if (unlikely(nE
!= nR
))
279 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
286 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
287 PUSH_DATA (push
, 0xffffffff);
289 ctx
->dest
+= ctx
->vertex_size
;
297 disp_vertices_i16(struct push_context
*ctx
, unsigned start
, unsigned count
)
299 struct nouveau_pushbuf
*push
= ctx
->push
;
300 struct translate
*translate
= ctx
->translate
;
301 const uint16_t *restrict elts
= (uint16_t *)ctx
->idxbuf
+ start
;
307 if (unlikely(ctx
->prim_restart
))
308 nR
= prim_restart_search_i16(elts
, nR
, ctx
->restart_index
);
310 translate
->run_elts16(translate
, elts
, nR
,
311 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
313 ctx
->dest
+= nR
* ctx
->vertex_size
;
318 if (unlikely(ctx
->edgeflag
.enabled
))
319 nE
= ef_toggle_search_i16(ctx
, elts
, nR
);
322 if (likely(nE
>= 2)) {
323 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
324 PUSH_DATA (push
, pos
);
325 PUSH_DATA (push
, nE
);
329 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
331 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
332 PUSH_DATA (push
, pos
);
335 if (unlikely(nE
!= nR
))
336 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
343 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
344 PUSH_DATA (push
, 0xffffffff);
346 ctx
->dest
+= ctx
->vertex_size
;
354 disp_vertices_i32(struct push_context
*ctx
, unsigned start
, unsigned count
)
356 struct nouveau_pushbuf
*push
= ctx
->push
;
357 struct translate
*translate
= ctx
->translate
;
358 const uint32_t *restrict elts
= (uint32_t *)ctx
->idxbuf
+ start
;
364 if (unlikely(ctx
->prim_restart
))
365 nR
= prim_restart_search_i32(elts
, nR
, ctx
->restart_index
);
367 translate
->run_elts(translate
, elts
, nR
,
368 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
370 ctx
->dest
+= nR
* ctx
->vertex_size
;
375 if (unlikely(ctx
->edgeflag
.enabled
))
376 nE
= ef_toggle_search_i32(ctx
, elts
, nR
);
379 if (likely(nE
>= 2)) {
380 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
381 PUSH_DATA (push
, pos
);
382 PUSH_DATA (push
, nE
);
386 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
388 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
389 PUSH_DATA (push
, pos
);
392 if (unlikely(nE
!= nR
))
393 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
400 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
401 PUSH_DATA (push
, 0xffffffff);
403 ctx
->dest
+= ctx
->vertex_size
;
411 disp_vertices_seq(struct push_context
*ctx
, unsigned start
, unsigned count
)
413 struct nouveau_pushbuf
*push
= ctx
->push
;
414 struct translate
*translate
= ctx
->translate
;
417 /* XXX: This will read the data corresponding to the primitive restart index,
418 * maybe we should avoid that ?
420 translate
->run(translate
, start
, count
,
421 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
425 if (unlikely(ctx
->edgeflag
.enabled
))
426 nr
= ef_toggle_search_seq(ctx
, start
+ pos
, nr
);
430 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
431 PUSH_DATA (push
, pos
);
432 PUSH_DATA (push
, nr
);
434 if (unlikely(nr
!= count
))
435 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
443 #define NVC0_PRIM_GL_CASE(n) \
444 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
446 static inline unsigned
447 nvc0_prim_gl(unsigned prim
)
450 NVC0_PRIM_GL_CASE(POINTS
);
451 NVC0_PRIM_GL_CASE(LINES
);
452 NVC0_PRIM_GL_CASE(LINE_LOOP
);
453 NVC0_PRIM_GL_CASE(LINE_STRIP
);
454 NVC0_PRIM_GL_CASE(TRIANGLES
);
455 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP
);
456 NVC0_PRIM_GL_CASE(TRIANGLE_FAN
);
457 NVC0_PRIM_GL_CASE(QUADS
);
458 NVC0_PRIM_GL_CASE(QUAD_STRIP
);
459 NVC0_PRIM_GL_CASE(POLYGON
);
460 NVC0_PRIM_GL_CASE(LINES_ADJACENCY
);
461 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY
);
462 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY
);
463 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY
);
464 NVC0_PRIM_GL_CASE(PATCHES
);
466 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS
;
471 nvc0_push_vbo(struct nvc0_context
*nvc0
, const struct pipe_draw_info
*info
)
473 struct push_context ctx
;
474 unsigned i
, index_size
;
475 unsigned inst_count
= info
->instance_count
;
476 unsigned vert_count
= info
->count
;
479 nvc0_push_context_init(nvc0
, &ctx
);
481 nvc0_vertex_configure_translate(nvc0
, info
->index_bias
);
483 if (nvc0
->state
.index_bias
) {
484 /* this is already taken care of by translate */
485 IMMED_NVC0(ctx
.push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
486 nvc0
->state
.index_bias
= 0;
489 if (unlikely(ctx
.edgeflag
.enabled
))
490 nvc0_push_map_edgeflag(&ctx
, nvc0
, info
->index_bias
);
492 ctx
.prim_restart
= info
->primitive_restart
;
493 ctx
.restart_index
= info
->restart_index
;
495 if (info
->primitive_restart
) {
496 /* NOTE: I hope we won't ever need that last index (~0).
497 * If we do, we have to disable primitive restart here always and
498 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
499 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
500 * and add manual restart to disp_vertices_seq.
502 BEGIN_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 2);
503 PUSH_DATA (ctx
.push
, 1);
504 PUSH_DATA (ctx
.push
, info
->index_size
? 0xffffffff : info
->restart_index
);
506 if (nvc0
->state
.prim_restart
) {
507 IMMED_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 0);
509 nvc0
->state
.prim_restart
= info
->primitive_restart
;
511 if (info
->index_size
) {
512 nvc0_push_map_idxbuf(&ctx
, nvc0
, info
, info
->start
* info
->index_size
);
513 index_size
= info
->index_size
;
515 if (unlikely(info
->count_from_stream_output
)) {
516 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
517 struct nvc0_so_target
*targ
;
518 targ
= nvc0_so_target(info
->count_from_stream_output
);
519 pipe
->get_query_result(pipe
, targ
->pq
, true, (void *)&vert_count
);
520 vert_count
/= targ
->stride
;
522 ctx
.idxbuf
= NULL
; /* shut up warnings */
526 ctx
.start_instance
= info
->start_instance
;
528 prim
= nvc0_prim_gl(info
->mode
);
530 PUSH_SPACE(ctx
.push
, 9);
532 ctx
.dest
= nvc0_push_setup_vertex_array(nvc0
, vert_count
);
533 if (unlikely(!ctx
.dest
))
536 if (unlikely(ctx
.need_vertex_id
))
537 nvc0_push_upload_vertex_ids(&ctx
, nvc0
, info
);
539 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
540 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
541 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
542 PUSH_DATA (ctx
.push
, prim
);
543 switch (index_size
) {
545 disp_vertices_i08(&ctx
, info
->start
, vert_count
);
548 disp_vertices_i16(&ctx
, info
->start
, vert_count
);
551 disp_vertices_i32(&ctx
, info
->start
, vert_count
);
554 assert(index_size
== 0);
555 disp_vertices_seq(&ctx
, info
->start
, vert_count
);
558 PUSH_SPACE(ctx
.push
, 1);
559 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_END_GL
), 0);
562 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
565 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX_TMP
);
566 nouveau_scratch_done(&nvc0
->base
);
567 } while (inst_count
);
570 /* reset state and unmap buffers (no-op) */
572 if (unlikely(!ctx
.edgeflag
.value
)) {
573 PUSH_SPACE(ctx
.push
, 1);
574 IMMED_NVC0(ctx
.push
, NVC0_3D(EDGEFLAG
), 1);
577 if (unlikely(ctx
.need_vertex_id
)) {
578 PUSH_SPACE(ctx
.push
, 4);
579 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ID_REPLACE
), 0);
580 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
582 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
|
583 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT
|
584 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
);
585 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
588 if (info
->index_size
&& !info
->has_user_indices
)
589 nouveau_resource_unmap(nv04_resource(info
->index
.resource
));
590 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
)
591 nouveau_resource_unmap(nv04_resource(nvc0
->vtxbuf
[i
].buffer
.resource
));
593 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_fallback_count
, 1);
597 copy_indices_u8(uint32_t *dst
, const uint8_t *elts
, uint32_t bias
, unsigned n
)
600 for (i
= 0; i
< n
; ++i
)
601 dst
[i
] = elts
[i
] + bias
;
605 copy_indices_u16(uint32_t *dst
, const uint16_t *elts
, uint32_t bias
, unsigned n
)
608 for (i
= 0; i
< n
; ++i
)
609 dst
[i
] = elts
[i
] + bias
;
613 copy_indices_u32(uint32_t *dst
, const uint32_t *elts
, uint32_t bias
, unsigned n
)
616 for (i
= 0; i
< n
; ++i
)
617 dst
[i
] = elts
[i
] + bias
;
621 nvc0_push_upload_vertex_ids(struct push_context
*ctx
,
622 struct nvc0_context
*nvc0
,
623 const struct pipe_draw_info
*info
)
626 struct nouveau_pushbuf
*push
= ctx
->push
;
627 struct nouveau_bo
*bo
;
631 unsigned index_size
= info
->index_size
;
633 unsigned a
= nvc0
->vertex
->num_elements
;
635 if (!index_size
|| info
->index_bias
)
637 data
= (uint32_t *)nouveau_scratch_get(&nvc0
->base
,
638 info
->count
* index_size
, &va
, &bo
);
640 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
642 nouveau_pushbuf_validate(push
);
644 if (info
->index_size
) {
645 if (!info
->index_bias
) {
646 memcpy(data
, ctx
->idxbuf
, info
->count
* index_size
);
648 switch (info
->index_size
) {
650 copy_indices_u8(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
653 copy_indices_u16(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
656 copy_indices_u32(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
661 for (i
= 0; i
< info
->count
; ++i
)
662 data
[i
] = i
+ (info
->start
+ info
->index_bias
);
665 format
= (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
) |
666 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT
;
668 switch (index_size
) {
670 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8
;
673 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16
;
676 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
;
680 PUSH_SPACE(push
, 12);
682 if (unlikely(nvc0
->state
.instance_elts
& 2)) {
683 nvc0
->state
.instance_elts
&= ~2;
684 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
687 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(a
)), 1);
688 PUSH_DATA (push
, format
);
690 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
691 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| index_size
);
692 PUSH_DATAh(push
, va
);
693 PUSH_DATA (push
, va
);
694 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
695 PUSH_DATAh(push
, va
+ info
->count
* index_size
- 1);
696 PUSH_DATA (push
, va
+ info
->count
* index_size
- 1);
698 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
699 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
701 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ID_REPLACE
), 1);
702 PUSH_DATA (push
, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a
) | 1);