2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
11 #include "nvc0/nvc0_3d.xml.h"
14 struct nouveau_pushbuf
*push
;
16 struct translate
*translate
;
21 uint32_t restart_index
;
22 uint32_t start_instance
;
37 static void nvc0_push_upload_vertex_ids(struct push_context
*,
38 struct nvc0_context
*,
39 const struct pipe_draw_info
*);
42 nvc0_push_context_init(struct nvc0_context
*nvc0
, struct push_context
*ctx
)
44 ctx
->push
= nvc0
->base
.pushbuf
;
46 ctx
->translate
= nvc0
->vertex
->translate
;
47 ctx
->vertex_size
= nvc0
->vertex
->size
;
51 nvc0
->vertprog
->vp
.need_vertex_id
&& (nvc0
->vertex
->num_elements
< 32);
53 ctx
->edgeflag
.value
= true;
54 ctx
->edgeflag
.enabled
= nvc0
->vertprog
->vp
.edgeflag
< PIPE_MAX_ATTRIBS
;
56 /* silence warnings */
57 ctx
->edgeflag
.data
= NULL
;
58 ctx
->edgeflag
.stride
= 0;
59 ctx
->edgeflag
.width
= 0;
63 nvc0_vertex_configure_translate(struct nvc0_context
*nvc0
, int32_t index_bias
)
65 struct translate
*translate
= nvc0
->vertex
->translate
;
68 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
) {
70 const struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[i
];
72 if (likely(!vb
->buffer
))
73 map
= (const uint8_t *)vb
->user_buffer
;
75 map
= nouveau_resource_map_offset(&nvc0
->base
,
76 nv04_resource(vb
->buffer
), vb
->buffer_offset
, NOUVEAU_BO_RD
);
78 if (index_bias
&& !unlikely(nvc0
->vertex
->instance_bufs
& (1 << i
)))
79 map
+= (intptr_t)index_bias
* vb
->stride
;
81 translate
->set_buffer(translate
, i
, map
, vb
->stride
, ~0);
86 nvc0_push_map_idxbuf(struct push_context
*ctx
, struct nvc0_context
*nvc0
)
88 if (nvc0
->idxbuf
.buffer
) {
89 struct nv04_resource
*buf
= nv04_resource(nvc0
->idxbuf
.buffer
);
90 ctx
->idxbuf
= nouveau_resource_map_offset(&nvc0
->base
,
91 buf
, nvc0
->idxbuf
.offset
, NOUVEAU_BO_RD
);
93 ctx
->idxbuf
= nvc0
->idxbuf
.user_buffer
;
98 nvc0_push_map_edgeflag(struct push_context
*ctx
, struct nvc0_context
*nvc0
,
101 unsigned attr
= nvc0
->vertprog
->vp
.edgeflag
;
102 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[attr
].pipe
;
103 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[ve
->vertex_buffer_index
];
104 struct nv04_resource
*buf
= nv04_resource(vb
->buffer
);
106 ctx
->edgeflag
.stride
= vb
->stride
;
107 ctx
->edgeflag
.width
= util_format_get_blocksize(ve
->src_format
);
109 unsigned offset
= vb
->buffer_offset
+ ve
->src_offset
;
110 ctx
->edgeflag
.data
= nouveau_resource_map_offset(&nvc0
->base
,
111 buf
, offset
, NOUVEAU_BO_RD
);
113 ctx
->edgeflag
.data
= (const uint8_t *)vb
->user_buffer
+ ve
->src_offset
;
117 ctx
->edgeflag
.data
+= (intptr_t)index_bias
* vb
->stride
;
120 static inline unsigned
121 prim_restart_search_i08(const uint8_t *elts
, unsigned push
, uint8_t index
)
124 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
128 static inline unsigned
129 prim_restart_search_i16(const uint16_t *elts
, unsigned push
, uint16_t index
)
132 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
136 static inline unsigned
137 prim_restart_search_i32(const uint32_t *elts
, unsigned push
, uint32_t index
)
140 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
145 ef_value_8(const struct push_context
*ctx
, uint32_t index
)
147 uint8_t *pf
= (uint8_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
152 ef_value_32(const struct push_context
*ctx
, uint32_t index
)
154 uint32_t *pf
= (uint32_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
159 ef_toggle(struct push_context
*ctx
)
161 ctx
->edgeflag
.value
= !ctx
->edgeflag
.value
;
162 return ctx
->edgeflag
.value
;
165 static inline unsigned
166 ef_toggle_search_i08(struct push_context
*ctx
, const uint8_t *elts
, unsigned n
)
169 bool ef
= ctx
->edgeflag
.value
;
170 if (ctx
->edgeflag
.width
== 1)
171 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
173 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
177 static inline unsigned
178 ef_toggle_search_i16(struct push_context
*ctx
, const uint16_t *elts
, unsigned n
)
181 bool ef
= ctx
->edgeflag
.value
;
182 if (ctx
->edgeflag
.width
== 1)
183 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
185 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
189 static inline unsigned
190 ef_toggle_search_i32(struct push_context
*ctx
, const uint32_t *elts
, unsigned n
)
193 bool ef
= ctx
->edgeflag
.value
;
194 if (ctx
->edgeflag
.width
== 1)
195 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
197 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
201 static inline unsigned
202 ef_toggle_search_seq(struct push_context
*ctx
, unsigned start
, unsigned n
)
205 bool ef
= ctx
->edgeflag
.value
;
206 if (ctx
->edgeflag
.width
== 1)
207 for (i
= 0; i
< n
&& ef_value_8(ctx
, start
++) == ef
; ++i
);
209 for (i
= 0; i
< n
&& ef_value_32(ctx
, start
++) == ef
; ++i
);
214 nvc0_push_setup_vertex_array(struct nvc0_context
*nvc0
, const unsigned count
)
216 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
217 struct nouveau_bo
*bo
;
219 const unsigned size
= count
* nvc0
->vertex
->size
;
221 void *const dest
= nouveau_scratch_get(&nvc0
->base
, size
, &va
, &bo
);
223 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
224 PUSH_DATAh(push
, va
);
225 PUSH_DATA (push
, va
);
226 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
227 PUSH_DATAh(push
, va
+ size
- 1);
228 PUSH_DATA (push
, va
+ size
- 1);
230 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
232 nouveau_pushbuf_validate(push
);
238 disp_vertices_i08(struct push_context
*ctx
, unsigned start
, unsigned count
)
240 struct nouveau_pushbuf
*push
= ctx
->push
;
241 struct translate
*translate
= ctx
->translate
;
242 const uint8_t *restrict elts
= (uint8_t *)ctx
->idxbuf
+ start
;
248 if (unlikely(ctx
->prim_restart
))
249 nR
= prim_restart_search_i08(elts
, nR
, ctx
->restart_index
);
251 translate
->run_elts8(translate
, elts
, nR
,
252 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
254 ctx
->dest
+= nR
* ctx
->vertex_size
;
259 if (unlikely(ctx
->edgeflag
.enabled
))
260 nE
= ef_toggle_search_i08(ctx
, elts
, nR
);
263 if (likely(nE
>= 2)) {
264 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
265 PUSH_DATA (push
, pos
);
266 PUSH_DATA (push
, nE
);
270 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
272 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
273 PUSH_DATA (push
, pos
);
276 if (unlikely(nE
!= nR
))
277 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
284 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
285 PUSH_DATA (push
, 0xffffffff);
287 ctx
->dest
+= ctx
->vertex_size
;
295 disp_vertices_i16(struct push_context
*ctx
, unsigned start
, unsigned count
)
297 struct nouveau_pushbuf
*push
= ctx
->push
;
298 struct translate
*translate
= ctx
->translate
;
299 const uint16_t *restrict elts
= (uint16_t *)ctx
->idxbuf
+ start
;
305 if (unlikely(ctx
->prim_restart
))
306 nR
= prim_restart_search_i16(elts
, nR
, ctx
->restart_index
);
308 translate
->run_elts16(translate
, elts
, nR
,
309 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
311 ctx
->dest
+= nR
* ctx
->vertex_size
;
316 if (unlikely(ctx
->edgeflag
.enabled
))
317 nE
= ef_toggle_search_i16(ctx
, elts
, nR
);
320 if (likely(nE
>= 2)) {
321 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
322 PUSH_DATA (push
, pos
);
323 PUSH_DATA (push
, nE
);
327 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
329 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
330 PUSH_DATA (push
, pos
);
333 if (unlikely(nE
!= nR
))
334 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
341 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
342 PUSH_DATA (push
, 0xffffffff);
344 ctx
->dest
+= ctx
->vertex_size
;
352 disp_vertices_i32(struct push_context
*ctx
, unsigned start
, unsigned count
)
354 struct nouveau_pushbuf
*push
= ctx
->push
;
355 struct translate
*translate
= ctx
->translate
;
356 const uint32_t *restrict elts
= (uint32_t *)ctx
->idxbuf
+ start
;
362 if (unlikely(ctx
->prim_restart
))
363 nR
= prim_restart_search_i32(elts
, nR
, ctx
->restart_index
);
365 translate
->run_elts(translate
, elts
, nR
,
366 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
368 ctx
->dest
+= nR
* ctx
->vertex_size
;
373 if (unlikely(ctx
->edgeflag
.enabled
))
374 nE
= ef_toggle_search_i32(ctx
, elts
, nR
);
377 if (likely(nE
>= 2)) {
378 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
379 PUSH_DATA (push
, pos
);
380 PUSH_DATA (push
, nE
);
384 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
386 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
387 PUSH_DATA (push
, pos
);
390 if (unlikely(nE
!= nR
))
391 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
398 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
399 PUSH_DATA (push
, 0xffffffff);
401 ctx
->dest
+= ctx
->vertex_size
;
409 disp_vertices_seq(struct push_context
*ctx
, unsigned start
, unsigned count
)
411 struct nouveau_pushbuf
*push
= ctx
->push
;
412 struct translate
*translate
= ctx
->translate
;
415 /* XXX: This will read the data corresponding to the primitive restart index,
416 * maybe we should avoid that ?
418 translate
->run(translate
, start
, count
,
419 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
423 if (unlikely(ctx
->edgeflag
.enabled
))
424 nr
= ef_toggle_search_seq(ctx
, start
+ pos
, nr
);
428 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
429 PUSH_DATA (push
, pos
);
430 PUSH_DATA (push
, nr
);
432 if (unlikely(nr
!= count
))
433 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
441 #define NVC0_PRIM_GL_CASE(n) \
442 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
444 static inline unsigned
445 nvc0_prim_gl(unsigned prim
)
448 NVC0_PRIM_GL_CASE(POINTS
);
449 NVC0_PRIM_GL_CASE(LINES
);
450 NVC0_PRIM_GL_CASE(LINE_LOOP
);
451 NVC0_PRIM_GL_CASE(LINE_STRIP
);
452 NVC0_PRIM_GL_CASE(TRIANGLES
);
453 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP
);
454 NVC0_PRIM_GL_CASE(TRIANGLE_FAN
);
455 NVC0_PRIM_GL_CASE(QUADS
);
456 NVC0_PRIM_GL_CASE(QUAD_STRIP
);
457 NVC0_PRIM_GL_CASE(POLYGON
);
458 NVC0_PRIM_GL_CASE(LINES_ADJACENCY
);
459 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY
);
460 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY
);
461 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY
);
462 NVC0_PRIM_GL_CASE(PATCHES
);
464 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS
;
469 nvc0_push_vbo(struct nvc0_context
*nvc0
, const struct pipe_draw_info
*info
)
471 struct push_context ctx
;
472 unsigned i
, index_size
;
473 unsigned inst_count
= info
->instance_count
;
474 unsigned vert_count
= info
->count
;
477 nvc0_push_context_init(nvc0
, &ctx
);
479 nvc0_vertex_configure_translate(nvc0
, info
->index_bias
);
481 if (nvc0
->state
.index_bias
) {
482 /* this is already taken care of by translate */
483 IMMED_NVC0(ctx
.push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
484 nvc0
->state
.index_bias
= 0;
487 if (unlikely(ctx
.edgeflag
.enabled
))
488 nvc0_push_map_edgeflag(&ctx
, nvc0
, info
->index_bias
);
490 ctx
.prim_restart
= info
->primitive_restart
;
491 ctx
.restart_index
= info
->restart_index
;
493 if (info
->primitive_restart
) {
494 /* NOTE: I hope we won't ever need that last index (~0).
495 * If we do, we have to disable primitive restart here always and
496 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
497 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
498 * and add manual restart to disp_vertices_seq.
500 BEGIN_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 2);
501 PUSH_DATA (ctx
.push
, 1);
502 PUSH_DATA (ctx
.push
, info
->indexed
? 0xffffffff : info
->restart_index
);
504 if (nvc0
->state
.prim_restart
) {
505 IMMED_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 0);
507 nvc0
->state
.prim_restart
= info
->primitive_restart
;
510 nvc0_push_map_idxbuf(&ctx
, nvc0
);
511 index_size
= nvc0
->idxbuf
.index_size
;
513 if (unlikely(info
->count_from_stream_output
)) {
514 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
515 struct nvc0_so_target
*targ
;
516 targ
= nvc0_so_target(info
->count_from_stream_output
);
517 pipe
->get_query_result(pipe
, targ
->pq
, true, (void *)&vert_count
);
518 vert_count
/= targ
->stride
;
520 ctx
.idxbuf
= NULL
; /* shut up warnings */
524 ctx
.start_instance
= info
->start_instance
;
526 prim
= nvc0_prim_gl(info
->mode
);
528 PUSH_SPACE(ctx
.push
, 9);
530 ctx
.dest
= nvc0_push_setup_vertex_array(nvc0
, vert_count
);
531 if (unlikely(!ctx
.dest
))
534 if (unlikely(ctx
.need_vertex_id
))
535 nvc0_push_upload_vertex_ids(&ctx
, nvc0
, info
);
537 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
538 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
539 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
540 PUSH_DATA (ctx
.push
, prim
);
541 switch (index_size
) {
543 disp_vertices_i08(&ctx
, info
->start
, vert_count
);
546 disp_vertices_i16(&ctx
, info
->start
, vert_count
);
549 disp_vertices_i32(&ctx
, info
->start
, vert_count
);
552 assert(index_size
== 0);
553 disp_vertices_seq(&ctx
, info
->start
, vert_count
);
556 PUSH_SPACE(ctx
.push
, 1);
557 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_END_GL
), 0);
560 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
563 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX_TMP
);
564 nouveau_scratch_done(&nvc0
->base
);
565 } while (inst_count
);
568 /* reset state and unmap buffers (no-op) */
570 if (unlikely(!ctx
.edgeflag
.value
)) {
571 PUSH_SPACE(ctx
.push
, 1);
572 IMMED_NVC0(ctx
.push
, NVC0_3D(EDGEFLAG
), 1);
575 if (unlikely(ctx
.need_vertex_id
)) {
576 PUSH_SPACE(ctx
.push
, 4);
577 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ID_REPLACE
), 0);
578 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
580 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
|
581 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT
|
582 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
);
583 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
587 nouveau_resource_unmap(nv04_resource(nvc0
->idxbuf
.buffer
));
588 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
)
589 nouveau_resource_unmap(nv04_resource(nvc0
->vtxbuf
[i
].buffer
));
591 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_fallback_count
, 1);
595 copy_indices_u8(uint32_t *dst
, const uint8_t *elts
, uint32_t bias
, unsigned n
)
598 for (i
= 0; i
< n
; ++i
)
599 dst
[i
] = elts
[i
] + bias
;
603 copy_indices_u16(uint32_t *dst
, const uint16_t *elts
, uint32_t bias
, unsigned n
)
606 for (i
= 0; i
< n
; ++i
)
607 dst
[i
] = elts
[i
] + bias
;
611 copy_indices_u32(uint32_t *dst
, const uint32_t *elts
, uint32_t bias
, unsigned n
)
614 for (i
= 0; i
< n
; ++i
)
615 dst
[i
] = elts
[i
] + bias
;
619 nvc0_push_upload_vertex_ids(struct push_context
*ctx
,
620 struct nvc0_context
*nvc0
,
621 const struct pipe_draw_info
*info
)
624 struct nouveau_pushbuf
*push
= ctx
->push
;
625 struct nouveau_bo
*bo
;
629 unsigned index_size
= nvc0
->idxbuf
.index_size
;
631 unsigned a
= nvc0
->vertex
->num_elements
;
633 if (!index_size
|| info
->index_bias
)
635 data
= (uint32_t *)nouveau_scratch_get(&nvc0
->base
,
636 info
->count
* index_size
, &va
, &bo
);
638 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
640 nouveau_pushbuf_validate(push
);
643 if (!info
->index_bias
) {
644 memcpy(data
, ctx
->idxbuf
, info
->count
* index_size
);
646 switch (nvc0
->idxbuf
.index_size
) {
648 copy_indices_u8(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
651 copy_indices_u16(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
654 copy_indices_u32(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
659 for (i
= 0; i
< info
->count
; ++i
)
660 data
[i
] = i
+ (info
->start
+ info
->index_bias
);
663 format
= (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
) |
664 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT
;
666 switch (index_size
) {
668 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8
;
671 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16
;
674 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
;
678 PUSH_SPACE(push
, 12);
680 if (unlikely(nvc0
->state
.instance_elts
& 2)) {
681 nvc0
->state
.instance_elts
&= ~2;
682 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
685 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(a
)), 1);
686 PUSH_DATA (push
, format
);
688 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
689 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| index_size
);
690 PUSH_DATAh(push
, va
);
691 PUSH_DATA (push
, va
);
692 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
693 PUSH_DATAh(push
, va
+ info
->count
* index_size
- 1);
694 PUSH_DATA (push
, va
+ info
->count
* index_size
- 1);
696 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
697 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
699 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ID_REPLACE
), 1);
700 PUSH_DATA (push
, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a
) | 1);