draw/translate: fix instancing
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_vbo_translate.c
1
2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
7
8 #include "nvc0_context.h"
9 #include "nvc0_resource.h"
10
11 #include "nvc0_3d.xml.h"
12
13 struct push_context {
14 struct nouveau_pushbuf *push;
15
16 struct translate *translate;
17 void *dest;
18 const void *idxbuf;
19
20 uint32_t vertex_size;
21 uint32_t restart_index;
22 uint32_t instance_id;
23
24 boolean prim_restart;
25 boolean need_vertex_id;
26
27 struct {
28 boolean enabled;
29 boolean value;
30 unsigned stride;
31 const uint8_t *data;
32 } edgeflag;
33 };
34
35 static void nvc0_push_upload_vertex_ids(struct push_context *,
36 struct nvc0_context *,
37 const struct pipe_draw_info *);
38
39 static void
40 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
41 {
42 ctx->push = nvc0->base.pushbuf;
43
44 ctx->translate = nvc0->vertex->translate;
45 ctx->vertex_size = nvc0->vertex->size;
46
47 ctx->need_vertex_id =
48 nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
49
50 ctx->edgeflag.value = TRUE;
51 ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
52
53 /* silence warnings */
54 ctx->edgeflag.data = NULL;
55 ctx->edgeflag.stride = 0;
56 }
57
58 static INLINE void
59 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
60 {
61 struct translate *translate = nvc0->vertex->translate;
62 unsigned i;
63
64 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
65 const uint8_t *map;
66 const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
67
68 if (likely(!vb->buffer))
69 map = (const uint8_t *)vb->user_buffer;
70 else
71 map = nouveau_resource_map_offset(&nvc0->base,
72 nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
73
74 if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
75 map += (intptr_t)index_bias * vb->stride;
76
77 translate->set_buffer(translate, i, map, vb->stride, ~0);
78 }
79 }
80
81 static INLINE void
82 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
83 {
84 if (nvc0->idxbuf.buffer) {
85 struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
86 ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
87 buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
88 } else {
89 ctx->idxbuf = nvc0->idxbuf.user_buffer;
90 }
91 }
92
93 static INLINE void
94 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
95 int32_t index_bias)
96 {
97 unsigned attr = nvc0->vertprog->vp.edgeflag;
98 struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
99 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
100 struct nv04_resource *buf = nv04_resource(vb->buffer);
101 unsigned offset = vb->buffer_offset + ve->src_offset;
102
103 ctx->edgeflag.stride = vb->stride;
104 ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
105 buf, offset, NOUVEAU_BO_RD);
106 if (index_bias)
107 ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
108 }
109
110 static INLINE unsigned
111 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
112 {
113 unsigned i;
114 for (i = 0; i < push && elts[i] != index; ++i);
115 return i;
116 }
117
118 static INLINE unsigned
119 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
120 {
121 unsigned i;
122 for (i = 0; i < push && elts[i] != index; ++i);
123 return i;
124 }
125
126 static INLINE unsigned
127 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
128 {
129 unsigned i;
130 for (i = 0; i < push && elts[i] != index; ++i);
131 return i;
132 }
133
134 static INLINE boolean
135 ef_value(const struct push_context *ctx, uint32_t index)
136 {
137 float *pf = (float *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
138 return *pf ? TRUE : FALSE;
139 }
140
141 static INLINE boolean
142 ef_toggle(struct push_context *ctx)
143 {
144 ctx->edgeflag.value = !ctx->edgeflag.value;
145 return ctx->edgeflag.value;
146 }
147
148 static INLINE unsigned
149 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
150 {
151 unsigned i;
152 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
153 return i;
154 }
155
156 static INLINE unsigned
157 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
158 {
159 unsigned i;
160 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
161 return i;
162 }
163
164 static INLINE unsigned
165 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
166 {
167 unsigned i;
168 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
169 return i;
170 }
171
172 static INLINE unsigned
173 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
174 {
175 unsigned i;
176 for (i = 0; i < n && ef_value(ctx, start++) == ctx->edgeflag.value; ++i);
177 return i;
178 }
179
180 static INLINE void *
181 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
182 {
183 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
184 struct nouveau_bo *bo;
185 uint64_t va;
186 const unsigned size = count * nvc0->vertex->size;
187
188 void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
189
190 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
191 PUSH_DATAh(push, va);
192 PUSH_DATA (push, va);
193 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
194 PUSH_DATAh(push, va + size - 1);
195 PUSH_DATA (push, va + size - 1);
196
197 BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
198 bo);
199 nouveau_pushbuf_validate(push);
200
201 return dest;
202 }
203
204 static void
205 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
206 {
207 struct nouveau_pushbuf *push = ctx->push;
208 struct translate *translate = ctx->translate;
209 const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
210 unsigned pos = 0;
211
212 do {
213 unsigned nR = count;
214
215 if (unlikely(ctx->prim_restart))
216 nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
217
218 translate->run_elts8(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
219 count -= nR;
220 ctx->dest += nR * ctx->vertex_size;
221
222 while (nR) {
223 unsigned nE = nR;
224
225 if (unlikely(ctx->edgeflag.enabled))
226 nE = ef_toggle_search_i08(ctx, elts, nR);
227
228 PUSH_SPACE(push, 4);
229 if (likely(nE >= 2)) {
230 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
231 PUSH_DATA (push, pos);
232 PUSH_DATA (push, nE);
233 } else
234 if (nE) {
235 if (pos <= 0xff) {
236 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
237 } else {
238 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
239 PUSH_DATA (push, pos);
240 }
241 }
242 if (unlikely(nE != nR))
243 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
244
245 pos += nE;
246 elts += nE;
247 nR -= nE;
248 }
249 if (count) {
250 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
251 PUSH_DATA (push, ctx->restart_index);
252 ++elts;
253 ctx->dest += ctx->vertex_size;
254 ++pos;
255 --count;
256 }
257 } while (count);
258 }
259
260 static void
261 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
262 {
263 struct nouveau_pushbuf *push = ctx->push;
264 struct translate *translate = ctx->translate;
265 const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
266 unsigned pos = 0;
267
268 do {
269 unsigned nR = count;
270
271 if (unlikely(ctx->prim_restart))
272 nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
273
274 translate->run_elts16(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
275 count -= nR;
276 ctx->dest += nR * ctx->vertex_size;
277
278 while (nR) {
279 unsigned nE = nR;
280
281 if (unlikely(ctx->edgeflag.enabled))
282 nE = ef_toggle_search_i16(ctx, elts, nR);
283
284 PUSH_SPACE(push, 4);
285 if (likely(nE >= 2)) {
286 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
287 PUSH_DATA (push, pos);
288 PUSH_DATA (push, nE);
289 } else
290 if (nE) {
291 if (pos <= 0xff) {
292 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
293 } else {
294 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
295 PUSH_DATA (push, pos);
296 }
297 }
298 if (unlikely(nE != nR))
299 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
300
301 pos += nE;
302 elts += nE;
303 nR -= nE;
304 }
305 if (count) {
306 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
307 PUSH_DATA (push, ctx->restart_index);
308 ++elts;
309 ctx->dest += ctx->vertex_size;
310 ++pos;
311 --count;
312 }
313 } while (count);
314 }
315
316 static void
317 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
318 {
319 struct nouveau_pushbuf *push = ctx->push;
320 struct translate *translate = ctx->translate;
321 const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
322 unsigned pos = 0;
323
324 do {
325 unsigned nR = count;
326
327 if (unlikely(ctx->prim_restart))
328 nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
329
330 translate->run_elts(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
331 count -= nR;
332 ctx->dest += nR * ctx->vertex_size;
333
334 while (nR) {
335 unsigned nE = nR;
336
337 if (unlikely(ctx->edgeflag.enabled))
338 nE = ef_toggle_search_i32(ctx, elts, nR);
339
340 PUSH_SPACE(push, 4);
341 if (likely(nE >= 2)) {
342 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
343 PUSH_DATA (push, pos);
344 PUSH_DATA (push, nE);
345 } else
346 if (nE) {
347 if (pos <= 0xff) {
348 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
349 } else {
350 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
351 PUSH_DATA (push, pos);
352 }
353 }
354 if (unlikely(nE != nR))
355 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
356
357 pos += nE;
358 elts += nE;
359 nR -= nE;
360 }
361 if (count) {
362 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
363 PUSH_DATA (push, ctx->restart_index);
364 ++elts;
365 ctx->dest += ctx->vertex_size;
366 ++pos;
367 --count;
368 }
369 } while (count);
370 }
371
372 static void
373 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
374 {
375 struct nouveau_pushbuf *push = ctx->push;
376 struct translate *translate = ctx->translate;
377 unsigned pos = 0;
378
379 translate->run(translate, start, count, 0, ctx->instance_id, ctx->dest);
380 do {
381 unsigned nr = count;
382
383 if (unlikely(ctx->edgeflag.enabled))
384 nr = ef_toggle_search_seq(ctx, start + pos, nr);
385
386 PUSH_SPACE(push, 4);
387 if (likely(nr)) {
388 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
389 PUSH_DATA (push, pos);
390 PUSH_DATA (push, nr);
391 }
392 if (unlikely(nr != count))
393 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
394
395 pos += nr;
396 count -= nr;
397 } while (count);
398 }
399
400
401 #define NVC0_PRIM_GL_CASE(n) \
402 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
403
404 static INLINE unsigned
405 nvc0_prim_gl(unsigned prim)
406 {
407 switch (prim) {
408 NVC0_PRIM_GL_CASE(POINTS);
409 NVC0_PRIM_GL_CASE(LINES);
410 NVC0_PRIM_GL_CASE(LINE_LOOP);
411 NVC0_PRIM_GL_CASE(LINE_STRIP);
412 NVC0_PRIM_GL_CASE(TRIANGLES);
413 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
414 NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
415 NVC0_PRIM_GL_CASE(QUADS);
416 NVC0_PRIM_GL_CASE(QUAD_STRIP);
417 NVC0_PRIM_GL_CASE(POLYGON);
418 NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
419 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
420 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
421 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
422 /*
423 NVC0_PRIM_GL_CASE(PATCHES); */
424 default:
425 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
426 }
427 }
428
429 void
430 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
431 {
432 struct push_context ctx;
433 unsigned i, index_size;
434 unsigned inst_count = info->instance_count;
435 unsigned vert_count = info->count;
436 unsigned prim;
437
438 nvc0_push_context_init(nvc0, &ctx);
439
440 nvc0_vertex_configure_translate(nvc0, info->index_bias);
441
442 if (unlikely(ctx.edgeflag.enabled))
443 nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);
444
445 ctx.prim_restart = info->primitive_restart;
446 ctx.restart_index = info->restart_index;
447
448 if (info->indexed) {
449 nvc0_push_map_idxbuf(&ctx, nvc0);
450 index_size = nvc0->idxbuf.index_size;
451
452 if (info->primitive_restart) {
453 BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
454 PUSH_DATA (ctx.push, 1);
455 PUSH_DATA (ctx.push, info->restart_index);
456 } else
457 if (nvc0->state.prim_restart) {
458 IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
459 }
460 nvc0->state.prim_restart = info->primitive_restart;
461 } else {
462 if (unlikely(info->count_from_stream_output)) {
463 struct pipe_context *pipe = &nvc0->base.pipe;
464 struct nvc0_so_target *targ;
465 targ = nvc0_so_target(info->count_from_stream_output);
466 pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count);
467 vert_count /= targ->stride;
468 }
469 ctx.idxbuf = NULL; /* shut up warnings */
470 index_size = 0;
471 }
472
473 ctx.instance_id = info->start_instance;
474
475 prim = nvc0_prim_gl(info->mode);
476 do {
477 PUSH_SPACE(ctx.push, 9);
478
479 ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
480 if (unlikely(!ctx.dest))
481 break;
482
483 if (unlikely(ctx.need_vertex_id))
484 nvc0_push_upload_vertex_ids(&ctx, nvc0, info);
485
486 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
487 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
488 PUSH_DATA (ctx.push, prim);
489 switch (index_size) {
490 case 1:
491 disp_vertices_i08(&ctx, info->start, vert_count);
492 break;
493 case 2:
494 disp_vertices_i16(&ctx, info->start, vert_count);
495 break;
496 case 4:
497 disp_vertices_i32(&ctx, info->start, vert_count);
498 break;
499 default:
500 assert(index_size == 0);
501 disp_vertices_seq(&ctx, info->start, vert_count);
502 break;
503 }
504 PUSH_SPACE(ctx.push, 1);
505 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
506
507 if (--inst_count) {
508 prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
509 ++ctx.instance_id;
510 }
511 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP);
512 nouveau_scratch_done(&nvc0->base);
513 } while (inst_count);
514
515
516 /* reset state and unmap buffers (no-op) */
517
518 if (unlikely(!ctx.edgeflag.value)) {
519 PUSH_SPACE(ctx.push, 1);
520 IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
521 }
522
523 if (unlikely(ctx.need_vertex_id)) {
524 PUSH_SPACE(ctx.push, 4);
525 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
526 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
527 PUSH_DATA (ctx.push,
528 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
529 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
530 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
531 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
532 }
533
534 if (info->indexed)
535 nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
536 for (i = 0; i < nvc0->num_vtxbufs; ++i)
537 nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
538
539 NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
540 }
541
542 static INLINE void
543 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
544 {
545 unsigned i;
546 for (i = 0; i < n; ++i)
547 dst[i] = elts[i] + bias;
548 }
549
550 static INLINE void
551 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
552 {
553 unsigned i;
554 for (i = 0; i < n; ++i)
555 dst[i] = elts[i] + bias;
556 }
557
558 static INLINE void
559 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
560 {
561 unsigned i;
562 for (i = 0; i < n; ++i)
563 dst[i] = elts[i] + bias;
564 }
565
566 static void
567 nvc0_push_upload_vertex_ids(struct push_context *ctx,
568 struct nvc0_context *nvc0,
569 const struct pipe_draw_info *info)
570
571 {
572 struct nouveau_pushbuf *push = ctx->push;
573 struct nouveau_bo *bo;
574 uint64_t va;
575 uint32_t *data;
576 uint32_t format;
577 unsigned index_size = nvc0->idxbuf.index_size;
578 unsigned i;
579 unsigned a = nvc0->vertex->num_elements;
580
581 if (!index_size || info->index_bias)
582 index_size = 4;
583 data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
584 info->count * index_size, &va, &bo);
585
586 BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
587 bo);
588 nouveau_pushbuf_validate(push);
589
590 if (info->indexed) {
591 if (!info->index_bias) {
592 memcpy(data, ctx->idxbuf, info->count * index_size);
593 } else {
594 switch (nvc0->idxbuf.index_size) {
595 case 1:
596 copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
597 break;
598 case 2:
599 copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
600 break;
601 default:
602 copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
603 break;
604 }
605 }
606 } else {
607 for (i = 0; i < info->count; ++i)
608 data[i] = i + (info->start + info->index_bias);
609 }
610
611 format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
612 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
613
614 switch (index_size) {
615 case 1:
616 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
617 break;
618 case 2:
619 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
620 break;
621 default:
622 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
623 break;
624 }
625
626 PUSH_SPACE(push, 12);
627
628 if (unlikely(nvc0->state.instance_elts & 2)) {
629 nvc0->state.instance_elts &= ~2;
630 IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
631 }
632
633 BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
634 PUSH_DATA (push, format);
635
636 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
637 PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
638 PUSH_DATAh(push, va);
639 PUSH_DATA (push, va);
640 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
641 PUSH_DATAh(push, va + info->count * index_size - 1);
642 PUSH_DATA (push, va + info->count * index_size - 1);
643
644 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
645 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
646
647 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
648 PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
649 }