nvc0: preliminary tess support
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_vbo_translate.c
1
2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
7
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
10
11 #include "nvc0/nvc0_3d.xml.h"
12
13 struct push_context {
14 struct nouveau_pushbuf *push;
15
16 struct translate *translate;
17 void *dest;
18 const void *idxbuf;
19
20 uint32_t vertex_size;
21 uint32_t restart_index;
22 uint32_t instance_id;
23
24 bool prim_restart;
25 bool need_vertex_id;
26
27 struct {
28 bool enabled;
29 bool value;
30 unsigned stride;
31 const uint8_t *data;
32 } edgeflag;
33 };
34
35 static void nvc0_push_upload_vertex_ids(struct push_context *,
36 struct nvc0_context *,
37 const struct pipe_draw_info *);
38
39 static void
40 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
41 {
42 ctx->push = nvc0->base.pushbuf;
43
44 ctx->translate = nvc0->vertex->translate;
45 ctx->vertex_size = nvc0->vertex->size;
46
47 ctx->need_vertex_id =
48 nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
49
50 ctx->edgeflag.value = true;
51 ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
52
53 /* silence warnings */
54 ctx->edgeflag.data = NULL;
55 ctx->edgeflag.stride = 0;
56 }
57
58 static inline void
59 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
60 {
61 struct translate *translate = nvc0->vertex->translate;
62 unsigned i;
63
64 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
65 const uint8_t *map;
66 const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
67
68 if (likely(!vb->buffer))
69 map = (const uint8_t *)vb->user_buffer;
70 else
71 map = nouveau_resource_map_offset(&nvc0->base,
72 nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
73
74 if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
75 map += (intptr_t)index_bias * vb->stride;
76
77 translate->set_buffer(translate, i, map, vb->stride, ~0);
78 }
79 }
80
81 static inline void
82 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
83 {
84 if (nvc0->idxbuf.buffer) {
85 struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
86 ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
87 buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
88 } else {
89 ctx->idxbuf = nvc0->idxbuf.user_buffer;
90 }
91 }
92
93 static inline void
94 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
95 int32_t index_bias)
96 {
97 unsigned attr = nvc0->vertprog->vp.edgeflag;
98 struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
99 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
100 struct nv04_resource *buf = nv04_resource(vb->buffer);
101
102 ctx->edgeflag.stride = vb->stride;
103 if (buf) {
104 unsigned offset = vb->buffer_offset + ve->src_offset;
105 ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
106 buf, offset, NOUVEAU_BO_RD);
107 } else {
108 ctx->edgeflag.data = (const uint8_t *)vb->user_buffer + ve->src_offset;
109 }
110
111 if (index_bias)
112 ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
113 }
114
115 static inline unsigned
116 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
117 {
118 unsigned i;
119 for (i = 0; i < push && elts[i] != index; ++i);
120 return i;
121 }
122
123 static inline unsigned
124 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
125 {
126 unsigned i;
127 for (i = 0; i < push && elts[i] != index; ++i);
128 return i;
129 }
130
131 static inline unsigned
132 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
133 {
134 unsigned i;
135 for (i = 0; i < push && elts[i] != index; ++i);
136 return i;
137 }
138
139 static inline bool
140 ef_value(const struct push_context *ctx, uint32_t index)
141 {
142 float *pf = (float *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
143 return *pf ? true : false;
144 }
145
146 static inline bool
147 ef_toggle(struct push_context *ctx)
148 {
149 ctx->edgeflag.value = !ctx->edgeflag.value;
150 return ctx->edgeflag.value;
151 }
152
153 static inline unsigned
154 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
155 {
156 unsigned i;
157 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
158 return i;
159 }
160
161 static inline unsigned
162 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
163 {
164 unsigned i;
165 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
166 return i;
167 }
168
169 static inline unsigned
170 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
171 {
172 unsigned i;
173 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
174 return i;
175 }
176
177 static inline unsigned
178 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
179 {
180 unsigned i;
181 for (i = 0; i < n && ef_value(ctx, start++) == ctx->edgeflag.value; ++i);
182 return i;
183 }
184
185 static inline void *
186 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
187 {
188 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
189 struct nouveau_bo *bo;
190 uint64_t va;
191 const unsigned size = count * nvc0->vertex->size;
192
193 void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
194
195 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
196 PUSH_DATAh(push, va);
197 PUSH_DATA (push, va);
198 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
199 PUSH_DATAh(push, va + size - 1);
200 PUSH_DATA (push, va + size - 1);
201
202 BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
203 bo);
204 nouveau_pushbuf_validate(push);
205
206 return dest;
207 }
208
209 static void
210 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
211 {
212 struct nouveau_pushbuf *push = ctx->push;
213 struct translate *translate = ctx->translate;
214 const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
215 unsigned pos = 0;
216
217 do {
218 unsigned nR = count;
219
220 if (unlikely(ctx->prim_restart))
221 nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
222
223 translate->run_elts8(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
224 count -= nR;
225 ctx->dest += nR * ctx->vertex_size;
226
227 while (nR) {
228 unsigned nE = nR;
229
230 if (unlikely(ctx->edgeflag.enabled))
231 nE = ef_toggle_search_i08(ctx, elts, nR);
232
233 PUSH_SPACE(push, 4);
234 if (likely(nE >= 2)) {
235 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
236 PUSH_DATA (push, pos);
237 PUSH_DATA (push, nE);
238 } else
239 if (nE) {
240 if (pos <= 0xff) {
241 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
242 } else {
243 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
244 PUSH_DATA (push, pos);
245 }
246 }
247 if (unlikely(nE != nR))
248 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
249
250 pos += nE;
251 elts += nE;
252 nR -= nE;
253 }
254 if (count) {
255 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
256 PUSH_DATA (push, 0xffffffff);
257 ++elts;
258 ctx->dest += ctx->vertex_size;
259 ++pos;
260 --count;
261 }
262 } while (count);
263 }
264
265 static void
266 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
267 {
268 struct nouveau_pushbuf *push = ctx->push;
269 struct translate *translate = ctx->translate;
270 const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
271 unsigned pos = 0;
272
273 do {
274 unsigned nR = count;
275
276 if (unlikely(ctx->prim_restart))
277 nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
278
279 translate->run_elts16(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
280 count -= nR;
281 ctx->dest += nR * ctx->vertex_size;
282
283 while (nR) {
284 unsigned nE = nR;
285
286 if (unlikely(ctx->edgeflag.enabled))
287 nE = ef_toggle_search_i16(ctx, elts, nR);
288
289 PUSH_SPACE(push, 4);
290 if (likely(nE >= 2)) {
291 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
292 PUSH_DATA (push, pos);
293 PUSH_DATA (push, nE);
294 } else
295 if (nE) {
296 if (pos <= 0xff) {
297 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
298 } else {
299 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
300 PUSH_DATA (push, pos);
301 }
302 }
303 if (unlikely(nE != nR))
304 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
305
306 pos += nE;
307 elts += nE;
308 nR -= nE;
309 }
310 if (count) {
311 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
312 PUSH_DATA (push, 0xffffffff);
313 ++elts;
314 ctx->dest += ctx->vertex_size;
315 ++pos;
316 --count;
317 }
318 } while (count);
319 }
320
321 static void
322 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
323 {
324 struct nouveau_pushbuf *push = ctx->push;
325 struct translate *translate = ctx->translate;
326 const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
327 unsigned pos = 0;
328
329 do {
330 unsigned nR = count;
331
332 if (unlikely(ctx->prim_restart))
333 nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
334
335 translate->run_elts(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
336 count -= nR;
337 ctx->dest += nR * ctx->vertex_size;
338
339 while (nR) {
340 unsigned nE = nR;
341
342 if (unlikely(ctx->edgeflag.enabled))
343 nE = ef_toggle_search_i32(ctx, elts, nR);
344
345 PUSH_SPACE(push, 4);
346 if (likely(nE >= 2)) {
347 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
348 PUSH_DATA (push, pos);
349 PUSH_DATA (push, nE);
350 } else
351 if (nE) {
352 if (pos <= 0xff) {
353 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
354 } else {
355 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
356 PUSH_DATA (push, pos);
357 }
358 }
359 if (unlikely(nE != nR))
360 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
361
362 pos += nE;
363 elts += nE;
364 nR -= nE;
365 }
366 if (count) {
367 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
368 PUSH_DATA (push, 0xffffffff);
369 ++elts;
370 ctx->dest += ctx->vertex_size;
371 ++pos;
372 --count;
373 }
374 } while (count);
375 }
376
377 static void
378 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
379 {
380 struct nouveau_pushbuf *push = ctx->push;
381 struct translate *translate = ctx->translate;
382 unsigned pos = 0;
383
384 /* XXX: This will read the data corresponding to the primitive restart index,
385 * maybe we should avoid that ?
386 */
387 translate->run(translate, start, count, 0, ctx->instance_id, ctx->dest);
388 do {
389 unsigned nr = count;
390
391 if (unlikely(ctx->edgeflag.enabled))
392 nr = ef_toggle_search_seq(ctx, start + pos, nr);
393
394 PUSH_SPACE(push, 4);
395 if (likely(nr)) {
396 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
397 PUSH_DATA (push, pos);
398 PUSH_DATA (push, nr);
399 }
400 if (unlikely(nr != count))
401 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
402
403 pos += nr;
404 count -= nr;
405 } while (count);
406 }
407
408
409 #define NVC0_PRIM_GL_CASE(n) \
410 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
411
412 static inline unsigned
413 nvc0_prim_gl(unsigned prim)
414 {
415 switch (prim) {
416 NVC0_PRIM_GL_CASE(POINTS);
417 NVC0_PRIM_GL_CASE(LINES);
418 NVC0_PRIM_GL_CASE(LINE_LOOP);
419 NVC0_PRIM_GL_CASE(LINE_STRIP);
420 NVC0_PRIM_GL_CASE(TRIANGLES);
421 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
422 NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
423 NVC0_PRIM_GL_CASE(QUADS);
424 NVC0_PRIM_GL_CASE(QUAD_STRIP);
425 NVC0_PRIM_GL_CASE(POLYGON);
426 NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
427 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
428 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
429 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
430 NVC0_PRIM_GL_CASE(PATCHES);
431 default:
432 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
433 }
434 }
435
436 void
437 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
438 {
439 struct push_context ctx;
440 unsigned i, index_size;
441 unsigned inst_count = info->instance_count;
442 unsigned vert_count = info->count;
443 unsigned prim;
444
445 nvc0_push_context_init(nvc0, &ctx);
446
447 nvc0_vertex_configure_translate(nvc0, info->index_bias);
448
449 if (nvc0->state.index_bias) {
450 /* this is already taken care of by translate */
451 IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
452 nvc0->state.index_bias = 0;
453 }
454
455 if (unlikely(ctx.edgeflag.enabled))
456 nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);
457
458 ctx.prim_restart = info->primitive_restart;
459 ctx.restart_index = info->restart_index;
460
461 if (info->primitive_restart) {
462 /* NOTE: I hope we won't ever need that last index (~0).
463 * If we do, we have to disable primitive restart here always and
464 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
465 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
466 * and add manual restart to disp_vertices_seq.
467 */
468 BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
469 PUSH_DATA (ctx.push, 1);
470 PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index);
471 } else
472 if (nvc0->state.prim_restart) {
473 IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
474 }
475 nvc0->state.prim_restart = info->primitive_restart;
476
477 if (info->indexed) {
478 nvc0_push_map_idxbuf(&ctx, nvc0);
479 index_size = nvc0->idxbuf.index_size;
480 } else {
481 if (unlikely(info->count_from_stream_output)) {
482 struct pipe_context *pipe = &nvc0->base.pipe;
483 struct nvc0_so_target *targ;
484 targ = nvc0_so_target(info->count_from_stream_output);
485 pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
486 vert_count /= targ->stride;
487 }
488 ctx.idxbuf = NULL; /* shut up warnings */
489 index_size = 0;
490 }
491
492 ctx.instance_id = info->start_instance;
493
494 prim = nvc0_prim_gl(info->mode);
495 do {
496 PUSH_SPACE(ctx.push, 9);
497
498 ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
499 if (unlikely(!ctx.dest))
500 break;
501
502 if (unlikely(ctx.need_vertex_id))
503 nvc0_push_upload_vertex_ids(&ctx, nvc0, info);
504
505 if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
506 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
507 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
508 PUSH_DATA (ctx.push, prim);
509 switch (index_size) {
510 case 1:
511 disp_vertices_i08(&ctx, info->start, vert_count);
512 break;
513 case 2:
514 disp_vertices_i16(&ctx, info->start, vert_count);
515 break;
516 case 4:
517 disp_vertices_i32(&ctx, info->start, vert_count);
518 break;
519 default:
520 assert(index_size == 0);
521 disp_vertices_seq(&ctx, info->start, vert_count);
522 break;
523 }
524 PUSH_SPACE(ctx.push, 1);
525 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
526
527 if (--inst_count) {
528 prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
529 ++ctx.instance_id;
530 }
531 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP);
532 nouveau_scratch_done(&nvc0->base);
533 } while (inst_count);
534
535
536 /* reset state and unmap buffers (no-op) */
537
538 if (unlikely(!ctx.edgeflag.value)) {
539 PUSH_SPACE(ctx.push, 1);
540 IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
541 }
542
543 if (unlikely(ctx.need_vertex_id)) {
544 PUSH_SPACE(ctx.push, 4);
545 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
546 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
547 PUSH_DATA (ctx.push,
548 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
549 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
550 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
551 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
552 }
553
554 if (info->indexed)
555 nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
556 for (i = 0; i < nvc0->num_vtxbufs; ++i)
557 nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
558
559 NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
560 }
561
562 static inline void
563 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
564 {
565 unsigned i;
566 for (i = 0; i < n; ++i)
567 dst[i] = elts[i] + bias;
568 }
569
570 static inline void
571 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
572 {
573 unsigned i;
574 for (i = 0; i < n; ++i)
575 dst[i] = elts[i] + bias;
576 }
577
578 static inline void
579 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
580 {
581 unsigned i;
582 for (i = 0; i < n; ++i)
583 dst[i] = elts[i] + bias;
584 }
585
586 static void
587 nvc0_push_upload_vertex_ids(struct push_context *ctx,
588 struct nvc0_context *nvc0,
589 const struct pipe_draw_info *info)
590
591 {
592 struct nouveau_pushbuf *push = ctx->push;
593 struct nouveau_bo *bo;
594 uint64_t va;
595 uint32_t *data;
596 uint32_t format;
597 unsigned index_size = nvc0->idxbuf.index_size;
598 unsigned i;
599 unsigned a = nvc0->vertex->num_elements;
600
601 if (!index_size || info->index_bias)
602 index_size = 4;
603 data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
604 info->count * index_size, &va, &bo);
605
606 BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
607 bo);
608 nouveau_pushbuf_validate(push);
609
610 if (info->indexed) {
611 if (!info->index_bias) {
612 memcpy(data, ctx->idxbuf, info->count * index_size);
613 } else {
614 switch (nvc0->idxbuf.index_size) {
615 case 1:
616 copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
617 break;
618 case 2:
619 copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
620 break;
621 default:
622 copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
623 break;
624 }
625 }
626 } else {
627 for (i = 0; i < info->count; ++i)
628 data[i] = i + (info->start + info->index_bias);
629 }
630
631 format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
632 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
633
634 switch (index_size) {
635 case 1:
636 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
637 break;
638 case 2:
639 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
640 break;
641 default:
642 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
643 break;
644 }
645
646 PUSH_SPACE(push, 12);
647
648 if (unlikely(nvc0->state.instance_elts & 2)) {
649 nvc0->state.instance_elts &= ~2;
650 IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
651 }
652
653 BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
654 PUSH_DATA (push, format);
655
656 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
657 PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
658 PUSH_DATAh(push, va);
659 PUSH_DATA (push, va);
660 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
661 PUSH_DATAh(push, va + info->count * index_size - 1);
662 PUSH_DATA (push, va + info->count * index_size - 1);
663
664 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
665 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
666
667 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
668 PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
669 }