nouveau: use bool instead of boolean
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_vbo_translate.c
1
2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
7
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
10
11 #include "nvc0/nvc0_3d.xml.h"
12
13 struct push_context {
14 struct nouveau_pushbuf *push;
15
16 struct translate *translate;
17 void *dest;
18 const void *idxbuf;
19
20 uint32_t vertex_size;
21 uint32_t restart_index;
22 uint32_t instance_id;
23
24 bool prim_restart;
25 bool need_vertex_id;
26
27 struct {
28 bool enabled;
29 bool value;
30 unsigned stride;
31 const uint8_t *data;
32 } edgeflag;
33 };
34
35 static void nvc0_push_upload_vertex_ids(struct push_context *,
36 struct nvc0_context *,
37 const struct pipe_draw_info *);
38
39 static void
40 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
41 {
42 ctx->push = nvc0->base.pushbuf;
43
44 ctx->translate = nvc0->vertex->translate;
45 ctx->vertex_size = nvc0->vertex->size;
46
47 ctx->need_vertex_id =
48 nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
49
50 ctx->edgeflag.value = true;
51 ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
52
53 /* silence warnings */
54 ctx->edgeflag.data = NULL;
55 ctx->edgeflag.stride = 0;
56 }
57
58 static INLINE void
59 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
60 {
61 struct translate *translate = nvc0->vertex->translate;
62 unsigned i;
63
64 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
65 const uint8_t *map;
66 const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
67
68 if (likely(!vb->buffer))
69 map = (const uint8_t *)vb->user_buffer;
70 else
71 map = nouveau_resource_map_offset(&nvc0->base,
72 nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
73
74 if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
75 map += (intptr_t)index_bias * vb->stride;
76
77 translate->set_buffer(translate, i, map, vb->stride, ~0);
78 }
79 }
80
81 static INLINE void
82 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
83 {
84 if (nvc0->idxbuf.buffer) {
85 struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
86 ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
87 buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
88 } else {
89 ctx->idxbuf = nvc0->idxbuf.user_buffer;
90 }
91 }
92
93 static INLINE void
94 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
95 int32_t index_bias)
96 {
97 unsigned attr = nvc0->vertprog->vp.edgeflag;
98 struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
99 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
100 struct nv04_resource *buf = nv04_resource(vb->buffer);
101
102 ctx->edgeflag.stride = vb->stride;
103 if (buf) {
104 unsigned offset = vb->buffer_offset + ve->src_offset;
105 ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
106 buf, offset, NOUVEAU_BO_RD);
107 } else {
108 ctx->edgeflag.data = (const uint8_t *)vb->user_buffer + ve->src_offset;
109 }
110
111 if (index_bias)
112 ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
113 }
114
115 static INLINE unsigned
116 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
117 {
118 unsigned i;
119 for (i = 0; i < push && elts[i] != index; ++i);
120 return i;
121 }
122
123 static INLINE unsigned
124 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
125 {
126 unsigned i;
127 for (i = 0; i < push && elts[i] != index; ++i);
128 return i;
129 }
130
131 static INLINE unsigned
132 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
133 {
134 unsigned i;
135 for (i = 0; i < push && elts[i] != index; ++i);
136 return i;
137 }
138
139 static INLINE bool
140 ef_value(const struct push_context *ctx, uint32_t index)
141 {
142 float *pf = (float *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
143 return *pf ? true : false;
144 }
145
146 static INLINE bool
147 ef_toggle(struct push_context *ctx)
148 {
149 ctx->edgeflag.value = !ctx->edgeflag.value;
150 return ctx->edgeflag.value;
151 }
152
153 static INLINE unsigned
154 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
155 {
156 unsigned i;
157 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
158 return i;
159 }
160
161 static INLINE unsigned
162 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
163 {
164 unsigned i;
165 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
166 return i;
167 }
168
169 static INLINE unsigned
170 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
171 {
172 unsigned i;
173 for (i = 0; i < n && ef_value(ctx, elts[i]) == ctx->edgeflag.value; ++i);
174 return i;
175 }
176
177 static INLINE unsigned
178 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
179 {
180 unsigned i;
181 for (i = 0; i < n && ef_value(ctx, start++) == ctx->edgeflag.value; ++i);
182 return i;
183 }
184
185 static INLINE void *
186 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
187 {
188 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
189 struct nouveau_bo *bo;
190 uint64_t va;
191 const unsigned size = count * nvc0->vertex->size;
192
193 void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
194
195 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
196 PUSH_DATAh(push, va);
197 PUSH_DATA (push, va);
198 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
199 PUSH_DATAh(push, va + size - 1);
200 PUSH_DATA (push, va + size - 1);
201
202 BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
203 bo);
204 nouveau_pushbuf_validate(push);
205
206 return dest;
207 }
208
209 static void
210 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
211 {
212 struct nouveau_pushbuf *push = ctx->push;
213 struct translate *translate = ctx->translate;
214 const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
215 unsigned pos = 0;
216
217 do {
218 unsigned nR = count;
219
220 if (unlikely(ctx->prim_restart))
221 nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
222
223 translate->run_elts8(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
224 count -= nR;
225 ctx->dest += nR * ctx->vertex_size;
226
227 while (nR) {
228 unsigned nE = nR;
229
230 if (unlikely(ctx->edgeflag.enabled))
231 nE = ef_toggle_search_i08(ctx, elts, nR);
232
233 PUSH_SPACE(push, 4);
234 if (likely(nE >= 2)) {
235 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
236 PUSH_DATA (push, pos);
237 PUSH_DATA (push, nE);
238 } else
239 if (nE) {
240 if (pos <= 0xff) {
241 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
242 } else {
243 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
244 PUSH_DATA (push, pos);
245 }
246 }
247 if (unlikely(nE != nR))
248 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
249
250 pos += nE;
251 elts += nE;
252 nR -= nE;
253 }
254 if (count) {
255 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
256 PUSH_DATA (push, 0xffffffff);
257 ++elts;
258 ctx->dest += ctx->vertex_size;
259 ++pos;
260 --count;
261 }
262 } while (count);
263 }
264
265 static void
266 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
267 {
268 struct nouveau_pushbuf *push = ctx->push;
269 struct translate *translate = ctx->translate;
270 const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
271 unsigned pos = 0;
272
273 do {
274 unsigned nR = count;
275
276 if (unlikely(ctx->prim_restart))
277 nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
278
279 translate->run_elts16(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
280 count -= nR;
281 ctx->dest += nR * ctx->vertex_size;
282
283 while (nR) {
284 unsigned nE = nR;
285
286 if (unlikely(ctx->edgeflag.enabled))
287 nE = ef_toggle_search_i16(ctx, elts, nR);
288
289 PUSH_SPACE(push, 4);
290 if (likely(nE >= 2)) {
291 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
292 PUSH_DATA (push, pos);
293 PUSH_DATA (push, nE);
294 } else
295 if (nE) {
296 if (pos <= 0xff) {
297 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
298 } else {
299 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
300 PUSH_DATA (push, pos);
301 }
302 }
303 if (unlikely(nE != nR))
304 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
305
306 pos += nE;
307 elts += nE;
308 nR -= nE;
309 }
310 if (count) {
311 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
312 PUSH_DATA (push, 0xffffffff);
313 ++elts;
314 ctx->dest += ctx->vertex_size;
315 ++pos;
316 --count;
317 }
318 } while (count);
319 }
320
321 static void
322 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
323 {
324 struct nouveau_pushbuf *push = ctx->push;
325 struct translate *translate = ctx->translate;
326 const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
327 unsigned pos = 0;
328
329 do {
330 unsigned nR = count;
331
332 if (unlikely(ctx->prim_restart))
333 nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
334
335 translate->run_elts(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
336 count -= nR;
337 ctx->dest += nR * ctx->vertex_size;
338
339 while (nR) {
340 unsigned nE = nR;
341
342 if (unlikely(ctx->edgeflag.enabled))
343 nE = ef_toggle_search_i32(ctx, elts, nR);
344
345 PUSH_SPACE(push, 4);
346 if (likely(nE >= 2)) {
347 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
348 PUSH_DATA (push, pos);
349 PUSH_DATA (push, nE);
350 } else
351 if (nE) {
352 if (pos <= 0xff) {
353 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
354 } else {
355 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
356 PUSH_DATA (push, pos);
357 }
358 }
359 if (unlikely(nE != nR))
360 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
361
362 pos += nE;
363 elts += nE;
364 nR -= nE;
365 }
366 if (count) {
367 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
368 PUSH_DATA (push, 0xffffffff);
369 ++elts;
370 ctx->dest += ctx->vertex_size;
371 ++pos;
372 --count;
373 }
374 } while (count);
375 }
376
377 static void
378 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
379 {
380 struct nouveau_pushbuf *push = ctx->push;
381 struct translate *translate = ctx->translate;
382 unsigned pos = 0;
383
384 /* XXX: This will read the data corresponding to the primitive restart index,
385 * maybe we should avoid that ?
386 */
387 translate->run(translate, start, count, 0, ctx->instance_id, ctx->dest);
388 do {
389 unsigned nr = count;
390
391 if (unlikely(ctx->edgeflag.enabled))
392 nr = ef_toggle_search_seq(ctx, start + pos, nr);
393
394 PUSH_SPACE(push, 4);
395 if (likely(nr)) {
396 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
397 PUSH_DATA (push, pos);
398 PUSH_DATA (push, nr);
399 }
400 if (unlikely(nr != count))
401 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
402
403 pos += nr;
404 count -= nr;
405 } while (count);
406 }
407
408
409 #define NVC0_PRIM_GL_CASE(n) \
410 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
411
412 static INLINE unsigned
413 nvc0_prim_gl(unsigned prim)
414 {
415 switch (prim) {
416 NVC0_PRIM_GL_CASE(POINTS);
417 NVC0_PRIM_GL_CASE(LINES);
418 NVC0_PRIM_GL_CASE(LINE_LOOP);
419 NVC0_PRIM_GL_CASE(LINE_STRIP);
420 NVC0_PRIM_GL_CASE(TRIANGLES);
421 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
422 NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
423 NVC0_PRIM_GL_CASE(QUADS);
424 NVC0_PRIM_GL_CASE(QUAD_STRIP);
425 NVC0_PRIM_GL_CASE(POLYGON);
426 NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
427 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
428 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
429 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
430 /*
431 NVC0_PRIM_GL_CASE(PATCHES); */
432 default:
433 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
434 }
435 }
436
437 void
438 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
439 {
440 struct push_context ctx;
441 unsigned i, index_size;
442 unsigned inst_count = info->instance_count;
443 unsigned vert_count = info->count;
444 unsigned prim;
445
446 nvc0_push_context_init(nvc0, &ctx);
447
448 nvc0_vertex_configure_translate(nvc0, info->index_bias);
449
450 if (nvc0->state.index_bias) {
451 /* this is already taken care of by translate */
452 IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
453 nvc0->state.index_bias = 0;
454 }
455
456 if (unlikely(ctx.edgeflag.enabled))
457 nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);
458
459 ctx.prim_restart = info->primitive_restart;
460 ctx.restart_index = info->restart_index;
461
462 if (info->primitive_restart) {
463 /* NOTE: I hope we won't ever need that last index (~0).
464 * If we do, we have to disable primitive restart here always and
465 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
466 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
467 * and add manual restart to disp_vertices_seq.
468 */
469 BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
470 PUSH_DATA (ctx.push, 1);
471 PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index);
472 } else
473 if (nvc0->state.prim_restart) {
474 IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
475 }
476 nvc0->state.prim_restart = info->primitive_restart;
477
478 if (info->indexed) {
479 nvc0_push_map_idxbuf(&ctx, nvc0);
480 index_size = nvc0->idxbuf.index_size;
481 } else {
482 if (unlikely(info->count_from_stream_output)) {
483 struct pipe_context *pipe = &nvc0->base.pipe;
484 struct nvc0_so_target *targ;
485 targ = nvc0_so_target(info->count_from_stream_output);
486 pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
487 vert_count /= targ->stride;
488 }
489 ctx.idxbuf = NULL; /* shut up warnings */
490 index_size = 0;
491 }
492
493 ctx.instance_id = info->start_instance;
494
495 prim = nvc0_prim_gl(info->mode);
496 do {
497 PUSH_SPACE(ctx.push, 9);
498
499 ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
500 if (unlikely(!ctx.dest))
501 break;
502
503 if (unlikely(ctx.need_vertex_id))
504 nvc0_push_upload_vertex_ids(&ctx, nvc0, info);
505
506 if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
507 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
508 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
509 PUSH_DATA (ctx.push, prim);
510 switch (index_size) {
511 case 1:
512 disp_vertices_i08(&ctx, info->start, vert_count);
513 break;
514 case 2:
515 disp_vertices_i16(&ctx, info->start, vert_count);
516 break;
517 case 4:
518 disp_vertices_i32(&ctx, info->start, vert_count);
519 break;
520 default:
521 assert(index_size == 0);
522 disp_vertices_seq(&ctx, info->start, vert_count);
523 break;
524 }
525 PUSH_SPACE(ctx.push, 1);
526 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
527
528 if (--inst_count) {
529 prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
530 ++ctx.instance_id;
531 }
532 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX_TMP);
533 nouveau_scratch_done(&nvc0->base);
534 } while (inst_count);
535
536
537 /* reset state and unmap buffers (no-op) */
538
539 if (unlikely(!ctx.edgeflag.value)) {
540 PUSH_SPACE(ctx.push, 1);
541 IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
542 }
543
544 if (unlikely(ctx.need_vertex_id)) {
545 PUSH_SPACE(ctx.push, 4);
546 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
547 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
548 PUSH_DATA (ctx.push,
549 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
550 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
551 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
552 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
553 }
554
555 if (info->indexed)
556 nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
557 for (i = 0; i < nvc0->num_vtxbufs; ++i)
558 nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
559
560 NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
561 }
562
563 static INLINE void
564 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
565 {
566 unsigned i;
567 for (i = 0; i < n; ++i)
568 dst[i] = elts[i] + bias;
569 }
570
571 static INLINE void
572 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
573 {
574 unsigned i;
575 for (i = 0; i < n; ++i)
576 dst[i] = elts[i] + bias;
577 }
578
579 static INLINE void
580 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
581 {
582 unsigned i;
583 for (i = 0; i < n; ++i)
584 dst[i] = elts[i] + bias;
585 }
586
587 static void
588 nvc0_push_upload_vertex_ids(struct push_context *ctx,
589 struct nvc0_context *nvc0,
590 const struct pipe_draw_info *info)
591
592 {
593 struct nouveau_pushbuf *push = ctx->push;
594 struct nouveau_bo *bo;
595 uint64_t va;
596 uint32_t *data;
597 uint32_t format;
598 unsigned index_size = nvc0->idxbuf.index_size;
599 unsigned i;
600 unsigned a = nvc0->vertex->num_elements;
601
602 if (!index_size || info->index_bias)
603 index_size = 4;
604 data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
605 info->count * index_size, &va, &bo);
606
607 BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
608 bo);
609 nouveau_pushbuf_validate(push);
610
611 if (info->indexed) {
612 if (!info->index_bias) {
613 memcpy(data, ctx->idxbuf, info->count * index_size);
614 } else {
615 switch (nvc0->idxbuf.index_size) {
616 case 1:
617 copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
618 break;
619 case 2:
620 copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
621 break;
622 default:
623 copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
624 break;
625 }
626 }
627 } else {
628 for (i = 0; i < info->count; ++i)
629 data[i] = i + (info->start + info->index_bias);
630 }
631
632 format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
633 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
634
635 switch (index_size) {
636 case 1:
637 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
638 break;
639 case 2:
640 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
641 break;
642 default:
643 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
644 break;
645 }
646
647 PUSH_SPACE(push, 12);
648
649 if (unlikely(nvc0->state.instance_elts & 2)) {
650 nvc0->state.instance_elts &= ~2;
651 IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
652 }
653
654 BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
655 PUSH_DATA (push, format);
656
657 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
658 PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
659 PUSH_DATAh(push, va);
660 PUSH_DATA (push, va);
661 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
662 PUSH_DATAh(push, va + info->count * index_size - 1);
663 PUSH_DATA (push, va + info->count * index_size - 1);
664
665 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
666 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
667
668 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
669 PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
670 }