2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
11 #include "nvc0/nvc0_3d.xml.h"
14 struct nouveau_pushbuf
*push
;
16 struct translate
*translate
;
21 uint32_t restart_index
;
22 uint32_t start_instance
;
37 static void nvc0_push_upload_vertex_ids(struct push_context
*,
38 struct nvc0_context
*,
39 const struct pipe_draw_info
*);
42 nvc0_push_context_init(struct nvc0_context
*nvc0
, struct push_context
*ctx
)
44 ctx
->push
= nvc0
->base
.pushbuf
;
46 ctx
->translate
= nvc0
->vertex
->translate
;
47 ctx
->vertex_size
= nvc0
->vertex
->size
;
51 nvc0
->vertprog
->vp
.need_vertex_id
&& (nvc0
->vertex
->num_elements
< 32);
53 ctx
->edgeflag
.value
= true;
54 ctx
->edgeflag
.enabled
= nvc0
->vertprog
->vp
.edgeflag
< PIPE_MAX_ATTRIBS
;
56 /* silence warnings */
57 ctx
->edgeflag
.data
= NULL
;
58 ctx
->edgeflag
.stride
= 0;
59 ctx
->edgeflag
.width
= 0;
63 nvc0_vertex_configure_translate(struct nvc0_context
*nvc0
, int32_t index_bias
)
65 struct translate
*translate
= nvc0
->vertex
->translate
;
68 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
) {
70 const struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[i
];
72 if (likely(vb
->is_user_buffer
))
73 map
= (const uint8_t *)vb
->buffer
.user
;
75 map
= nouveau_resource_map_offset(&nvc0
->base
,
76 nv04_resource(vb
->buffer
.resource
), vb
->buffer_offset
, NOUVEAU_BO_RD
);
78 if (index_bias
&& !unlikely(nvc0
->vertex
->instance_bufs
& (1 << i
)))
79 map
+= (intptr_t)index_bias
* vb
->stride
;
81 translate
->set_buffer(translate
, i
, map
, vb
->stride
, ~0);
86 nvc0_push_map_idxbuf(struct push_context
*ctx
, struct nvc0_context
*nvc0
,
87 const struct pipe_draw_info
*info
)
89 if (!info
->has_user_indices
) {
90 struct nv04_resource
*buf
= nv04_resource(info
->index
.resource
);
91 ctx
->idxbuf
= nouveau_resource_map_offset(
92 &nvc0
->base
, buf
, 0, NOUVEAU_BO_RD
);
94 ctx
->idxbuf
= info
->index
.user
;
99 nvc0_push_map_edgeflag(struct push_context
*ctx
, struct nvc0_context
*nvc0
,
102 unsigned attr
= nvc0
->vertprog
->vp
.edgeflag
;
103 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[attr
].pipe
;
104 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[ve
->vertex_buffer_index
];
105 struct nv04_resource
*buf
= nv04_resource(vb
->buffer
.resource
);
107 ctx
->edgeflag
.stride
= vb
->stride
;
108 ctx
->edgeflag
.width
= util_format_get_blocksize(ve
->src_format
);
109 if (!vb
->is_user_buffer
) {
110 unsigned offset
= vb
->buffer_offset
+ ve
->src_offset
;
111 ctx
->edgeflag
.data
= nouveau_resource_map_offset(&nvc0
->base
,
112 buf
, offset
, NOUVEAU_BO_RD
);
114 ctx
->edgeflag
.data
= (const uint8_t *)vb
->buffer
.user
+ ve
->src_offset
;
118 ctx
->edgeflag
.data
+= (intptr_t)index_bias
* vb
->stride
;
121 static inline unsigned
122 prim_restart_search_i08(const uint8_t *elts
, unsigned push
, uint8_t index
)
125 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
129 static inline unsigned
130 prim_restart_search_i16(const uint16_t *elts
, unsigned push
, uint16_t index
)
133 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
137 static inline unsigned
138 prim_restart_search_i32(const uint32_t *elts
, unsigned push
, uint32_t index
)
141 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
146 ef_value_8(const struct push_context
*ctx
, uint32_t index
)
148 uint8_t *pf
= (uint8_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
153 ef_value_32(const struct push_context
*ctx
, uint32_t index
)
155 uint32_t *pf
= (uint32_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
160 ef_toggle(struct push_context
*ctx
)
162 ctx
->edgeflag
.value
= !ctx
->edgeflag
.value
;
163 return ctx
->edgeflag
.value
;
166 static inline unsigned
167 ef_toggle_search_i08(struct push_context
*ctx
, const uint8_t *elts
, unsigned n
)
170 bool ef
= ctx
->edgeflag
.value
;
171 if (ctx
->edgeflag
.width
== 1)
172 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
174 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
178 static inline unsigned
179 ef_toggle_search_i16(struct push_context
*ctx
, const uint16_t *elts
, unsigned n
)
182 bool ef
= ctx
->edgeflag
.value
;
183 if (ctx
->edgeflag
.width
== 1)
184 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
186 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
190 static inline unsigned
191 ef_toggle_search_i32(struct push_context
*ctx
, const uint32_t *elts
, unsigned n
)
194 bool ef
= ctx
->edgeflag
.value
;
195 if (ctx
->edgeflag
.width
== 1)
196 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
198 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
202 static inline unsigned
203 ef_toggle_search_seq(struct push_context
*ctx
, unsigned start
, unsigned n
)
206 bool ef
= ctx
->edgeflag
.value
;
207 if (ctx
->edgeflag
.width
== 1)
208 for (i
= 0; i
< n
&& ef_value_8(ctx
, start
++) == ef
; ++i
);
210 for (i
= 0; i
< n
&& ef_value_32(ctx
, start
++) == ef
; ++i
);
215 nvc0_push_setup_vertex_array(struct nvc0_context
*nvc0
, const unsigned count
)
217 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
218 struct nouveau_bo
*bo
;
220 const unsigned size
= count
* nvc0
->vertex
->size
;
222 void *const dest
= nouveau_scratch_get(&nvc0
->base
, size
, &va
, &bo
);
224 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
225 PUSH_DATAh(push
, va
);
226 PUSH_DATA (push
, va
);
227 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
228 PUSH_DATAh(push
, va
+ size
- 1);
229 PUSH_DATA (push
, va
+ size
- 1);
231 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
233 nouveau_pushbuf_validate(push
);
239 disp_vertices_i08(struct push_context
*ctx
, unsigned start
, unsigned count
)
241 struct nouveau_pushbuf
*push
= ctx
->push
;
242 struct translate
*translate
= ctx
->translate
;
243 const uint8_t *restrict elts
= (uint8_t *)ctx
->idxbuf
+ start
;
249 if (unlikely(ctx
->prim_restart
))
250 nR
= prim_restart_search_i08(elts
, nR
, ctx
->restart_index
);
252 translate
->run_elts8(translate
, elts
, nR
,
253 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
255 ctx
->dest
+= nR
* ctx
->vertex_size
;
260 if (unlikely(ctx
->edgeflag
.enabled
))
261 nE
= ef_toggle_search_i08(ctx
, elts
, nR
);
264 if (likely(nE
>= 2)) {
265 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
266 PUSH_DATA (push
, pos
);
267 PUSH_DATA (push
, nE
);
271 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
273 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
274 PUSH_DATA (push
, pos
);
277 if (unlikely(nE
!= nR
))
278 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
285 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
286 PUSH_DATA (push
, 0xffffffff);
288 ctx
->dest
+= ctx
->vertex_size
;
296 disp_vertices_i16(struct push_context
*ctx
, unsigned start
, unsigned count
)
298 struct nouveau_pushbuf
*push
= ctx
->push
;
299 struct translate
*translate
= ctx
->translate
;
300 const uint16_t *restrict elts
= (uint16_t *)ctx
->idxbuf
+ start
;
306 if (unlikely(ctx
->prim_restart
))
307 nR
= prim_restart_search_i16(elts
, nR
, ctx
->restart_index
);
309 translate
->run_elts16(translate
, elts
, nR
,
310 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
312 ctx
->dest
+= nR
* ctx
->vertex_size
;
317 if (unlikely(ctx
->edgeflag
.enabled
))
318 nE
= ef_toggle_search_i16(ctx
, elts
, nR
);
321 if (likely(nE
>= 2)) {
322 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
323 PUSH_DATA (push
, pos
);
324 PUSH_DATA (push
, nE
);
328 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
330 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
331 PUSH_DATA (push
, pos
);
334 if (unlikely(nE
!= nR
))
335 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
342 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
343 PUSH_DATA (push
, 0xffffffff);
345 ctx
->dest
+= ctx
->vertex_size
;
353 disp_vertices_i32(struct push_context
*ctx
, unsigned start
, unsigned count
)
355 struct nouveau_pushbuf
*push
= ctx
->push
;
356 struct translate
*translate
= ctx
->translate
;
357 const uint32_t *restrict elts
= (uint32_t *)ctx
->idxbuf
+ start
;
363 if (unlikely(ctx
->prim_restart
))
364 nR
= prim_restart_search_i32(elts
, nR
, ctx
->restart_index
);
366 translate
->run_elts(translate
, elts
, nR
,
367 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
369 ctx
->dest
+= nR
* ctx
->vertex_size
;
374 if (unlikely(ctx
->edgeflag
.enabled
))
375 nE
= ef_toggle_search_i32(ctx
, elts
, nR
);
378 if (likely(nE
>= 2)) {
379 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
380 PUSH_DATA (push
, pos
);
381 PUSH_DATA (push
, nE
);
385 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
387 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
388 PUSH_DATA (push
, pos
);
391 if (unlikely(nE
!= nR
))
392 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
399 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
400 PUSH_DATA (push
, 0xffffffff);
402 ctx
->dest
+= ctx
->vertex_size
;
410 disp_vertices_seq(struct push_context
*ctx
, unsigned start
, unsigned count
)
412 struct nouveau_pushbuf
*push
= ctx
->push
;
413 struct translate
*translate
= ctx
->translate
;
416 /* XXX: This will read the data corresponding to the primitive restart index,
417 * maybe we should avoid that ?
419 translate
->run(translate
, start
, count
,
420 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
424 if (unlikely(ctx
->edgeflag
.enabled
))
425 nr
= ef_toggle_search_seq(ctx
, start
+ pos
, nr
);
429 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
430 PUSH_DATA (push
, pos
);
431 PUSH_DATA (push
, nr
);
433 if (unlikely(nr
!= count
))
434 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
442 #define NVC0_PRIM_GL_CASE(n) \
443 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
445 static inline unsigned
446 nvc0_prim_gl(unsigned prim
)
449 NVC0_PRIM_GL_CASE(POINTS
);
450 NVC0_PRIM_GL_CASE(LINES
);
451 NVC0_PRIM_GL_CASE(LINE_LOOP
);
452 NVC0_PRIM_GL_CASE(LINE_STRIP
);
453 NVC0_PRIM_GL_CASE(TRIANGLES
);
454 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP
);
455 NVC0_PRIM_GL_CASE(TRIANGLE_FAN
);
456 NVC0_PRIM_GL_CASE(QUADS
);
457 NVC0_PRIM_GL_CASE(QUAD_STRIP
);
458 NVC0_PRIM_GL_CASE(POLYGON
);
459 NVC0_PRIM_GL_CASE(LINES_ADJACENCY
);
460 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY
);
461 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY
);
462 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY
);
463 NVC0_PRIM_GL_CASE(PATCHES
);
465 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS
;
470 nvc0_push_vbo(struct nvc0_context
*nvc0
, const struct pipe_draw_info
*info
)
472 struct push_context ctx
;
473 unsigned i
, index_size
;
474 unsigned inst_count
= info
->instance_count
;
475 unsigned vert_count
= info
->count
;
478 nvc0_push_context_init(nvc0
, &ctx
);
480 nvc0_vertex_configure_translate(nvc0
, info
->index_bias
);
482 if (nvc0
->state
.index_bias
) {
483 /* this is already taken care of by translate */
484 IMMED_NVC0(ctx
.push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
485 nvc0
->state
.index_bias
= 0;
488 if (unlikely(ctx
.edgeflag
.enabled
))
489 nvc0_push_map_edgeflag(&ctx
, nvc0
, info
->index_bias
);
491 ctx
.prim_restart
= info
->primitive_restart
;
492 ctx
.restart_index
= info
->restart_index
;
494 if (info
->primitive_restart
) {
495 /* NOTE: I hope we won't ever need that last index (~0).
496 * If we do, we have to disable primitive restart here always and
497 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
498 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
499 * and add manual restart to disp_vertices_seq.
501 BEGIN_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 2);
502 PUSH_DATA (ctx
.push
, 1);
503 PUSH_DATA (ctx
.push
, info
->index_size
? 0xffffffff : info
->restart_index
);
505 if (nvc0
->state
.prim_restart
) {
506 IMMED_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 0);
508 nvc0
->state
.prim_restart
= info
->primitive_restart
;
510 if (info
->index_size
) {
511 nvc0_push_map_idxbuf(&ctx
, nvc0
, info
);
512 index_size
= info
->index_size
;
514 if (unlikely(info
->count_from_stream_output
)) {
515 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
516 struct nvc0_so_target
*targ
;
517 targ
= nvc0_so_target(info
->count_from_stream_output
);
518 pipe
->get_query_result(pipe
, targ
->pq
, true, (void *)&vert_count
);
519 vert_count
/= targ
->stride
;
521 ctx
.idxbuf
= NULL
; /* shut up warnings */
525 ctx
.start_instance
= info
->start_instance
;
527 prim
= nvc0_prim_gl(info
->mode
);
529 PUSH_SPACE(ctx
.push
, 9);
531 ctx
.dest
= nvc0_push_setup_vertex_array(nvc0
, vert_count
);
532 if (unlikely(!ctx
.dest
))
535 if (unlikely(ctx
.need_vertex_id
))
536 nvc0_push_upload_vertex_ids(&ctx
, nvc0
, info
);
538 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
539 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
540 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
541 PUSH_DATA (ctx
.push
, prim
);
542 switch (index_size
) {
544 disp_vertices_i08(&ctx
, info
->start
, vert_count
);
547 disp_vertices_i16(&ctx
, info
->start
, vert_count
);
550 disp_vertices_i32(&ctx
, info
->start
, vert_count
);
553 assert(index_size
== 0);
554 disp_vertices_seq(&ctx
, info
->start
, vert_count
);
557 PUSH_SPACE(ctx
.push
, 1);
558 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_END_GL
), 0);
561 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
564 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX_TMP
);
565 nouveau_scratch_done(&nvc0
->base
);
566 } while (inst_count
);
569 /* reset state and unmap buffers (no-op) */
571 if (unlikely(!ctx
.edgeflag
.value
)) {
572 PUSH_SPACE(ctx
.push
, 1);
573 IMMED_NVC0(ctx
.push
, NVC0_3D(EDGEFLAG
), 1);
576 if (unlikely(ctx
.need_vertex_id
)) {
577 PUSH_SPACE(ctx
.push
, 4);
578 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ID_REPLACE
), 0);
579 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
581 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
|
582 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT
|
583 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
);
584 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
587 if (info
->index_size
&& !info
->has_user_indices
)
588 nouveau_resource_unmap(nv04_resource(info
->index
.resource
));
589 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
)
590 nouveau_resource_unmap(nv04_resource(nvc0
->vtxbuf
[i
].buffer
.resource
));
592 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_fallback_count
, 1);
596 copy_indices_u8(uint32_t *dst
, const uint8_t *elts
, uint32_t bias
, unsigned n
)
599 for (i
= 0; i
< n
; ++i
)
600 dst
[i
] = elts
[i
] + bias
;
604 copy_indices_u16(uint32_t *dst
, const uint16_t *elts
, uint32_t bias
, unsigned n
)
607 for (i
= 0; i
< n
; ++i
)
608 dst
[i
] = elts
[i
] + bias
;
612 copy_indices_u32(uint32_t *dst
, const uint32_t *elts
, uint32_t bias
, unsigned n
)
615 for (i
= 0; i
< n
; ++i
)
616 dst
[i
] = elts
[i
] + bias
;
620 nvc0_push_upload_vertex_ids(struct push_context
*ctx
,
621 struct nvc0_context
*nvc0
,
622 const struct pipe_draw_info
*info
)
625 struct nouveau_pushbuf
*push
= ctx
->push
;
626 struct nouveau_bo
*bo
;
630 unsigned index_size
= info
->index_size
;
632 unsigned a
= nvc0
->vertex
->num_elements
;
634 if (!index_size
|| info
->index_bias
)
636 data
= (uint32_t *)nouveau_scratch_get(&nvc0
->base
,
637 info
->count
* index_size
, &va
, &bo
);
639 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
641 nouveau_pushbuf_validate(push
);
643 if (info
->index_size
) {
644 if (!info
->index_bias
) {
645 memcpy(data
, ctx
->idxbuf
, info
->count
* index_size
);
647 switch (info
->index_size
) {
649 copy_indices_u8(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
652 copy_indices_u16(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
655 copy_indices_u32(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
660 for (i
= 0; i
< info
->count
; ++i
)
661 data
[i
] = i
+ (info
->start
+ info
->index_bias
);
664 format
= (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
) |
665 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT
;
667 switch (index_size
) {
669 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8
;
672 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16
;
675 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
;
679 PUSH_SPACE(push
, 12);
681 if (unlikely(nvc0
->state
.instance_elts
& 2)) {
682 nvc0
->state
.instance_elts
&= ~2;
683 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
686 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(a
)), 1);
687 PUSH_DATA (push
, format
);
689 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
690 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| index_size
);
691 PUSH_DATAh(push
, va
);
692 PUSH_DATA (push
, va
);
693 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
694 PUSH_DATAh(push
, va
+ info
->count
* index_size
- 1);
695 PUSH_DATA (push
, va
+ info
->count
* index_size
- 1);
697 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
698 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
700 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ID_REPLACE
), 1);
701 PUSH_DATA (push
, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a
) | 1);