nvc0: rename 3d binding points to NVC0_BIND_3D_XXX
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_vbo_translate.c
1
2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
7
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
10
11 #include "nvc0/nvc0_3d.xml.h"
12
13 struct push_context {
14 struct nouveau_pushbuf *push;
15
16 struct translate *translate;
17 void *dest;
18 const void *idxbuf;
19
20 uint32_t vertex_size;
21 uint32_t restart_index;
22 uint32_t instance_id;
23
24 bool prim_restart;
25 bool need_vertex_id;
26
27 struct {
28 bool enabled;
29 bool value;
30 uint8_t width;
31 unsigned stride;
32 const uint8_t *data;
33 } edgeflag;
34 };
35
36 static void nvc0_push_upload_vertex_ids(struct push_context *,
37 struct nvc0_context *,
38 const struct pipe_draw_info *);
39
40 static void
41 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
42 {
43 ctx->push = nvc0->base.pushbuf;
44
45 ctx->translate = nvc0->vertex->translate;
46 ctx->vertex_size = nvc0->vertex->size;
47
48 ctx->need_vertex_id =
49 nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
50
51 ctx->edgeflag.value = true;
52 ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
53
54 /* silence warnings */
55 ctx->edgeflag.data = NULL;
56 ctx->edgeflag.stride = 0;
57 ctx->edgeflag.width = 0;
58 }
59
60 static inline void
61 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
62 {
63 struct translate *translate = nvc0->vertex->translate;
64 unsigned i;
65
66 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
67 const uint8_t *map;
68 const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
69
70 if (likely(!vb->buffer))
71 map = (const uint8_t *)vb->user_buffer;
72 else
73 map = nouveau_resource_map_offset(&nvc0->base,
74 nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
75
76 if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
77 map += (intptr_t)index_bias * vb->stride;
78
79 translate->set_buffer(translate, i, map, vb->stride, ~0);
80 }
81 }
82
83 static inline void
84 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
85 {
86 if (nvc0->idxbuf.buffer) {
87 struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
88 ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
89 buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
90 } else {
91 ctx->idxbuf = nvc0->idxbuf.user_buffer;
92 }
93 }
94
95 static inline void
96 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
97 int32_t index_bias)
98 {
99 unsigned attr = nvc0->vertprog->vp.edgeflag;
100 struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
101 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
102 struct nv04_resource *buf = nv04_resource(vb->buffer);
103
104 ctx->edgeflag.stride = vb->stride;
105 ctx->edgeflag.width = util_format_get_blocksize(ve->src_format);
106 if (buf) {
107 unsigned offset = vb->buffer_offset + ve->src_offset;
108 ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
109 buf, offset, NOUVEAU_BO_RD);
110 } else {
111 ctx->edgeflag.data = (const uint8_t *)vb->user_buffer + ve->src_offset;
112 }
113
114 if (index_bias)
115 ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
116 }
117
118 static inline unsigned
119 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
120 {
121 unsigned i;
122 for (i = 0; i < push && elts[i] != index; ++i);
123 return i;
124 }
125
126 static inline unsigned
127 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
128 {
129 unsigned i;
130 for (i = 0; i < push && elts[i] != index; ++i);
131 return i;
132 }
133
134 static inline unsigned
135 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
136 {
137 unsigned i;
138 for (i = 0; i < push && elts[i] != index; ++i);
139 return i;
140 }
141
142 static inline bool
143 ef_value_8(const struct push_context *ctx, uint32_t index)
144 {
145 uint8_t *pf = (uint8_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
146 return !!*pf;
147 }
148
149 static inline bool
150 ef_value_32(const struct push_context *ctx, uint32_t index)
151 {
152 uint32_t *pf = (uint32_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
153 return !!*pf;
154 }
155
156 static inline bool
157 ef_toggle(struct push_context *ctx)
158 {
159 ctx->edgeflag.value = !ctx->edgeflag.value;
160 return ctx->edgeflag.value;
161 }
162
163 static inline unsigned
164 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
165 {
166 unsigned i;
167 bool ef = ctx->edgeflag.value;
168 if (ctx->edgeflag.width == 1)
169 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
170 else
171 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
172 return i;
173 }
174
175 static inline unsigned
176 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
177 {
178 unsigned i;
179 bool ef = ctx->edgeflag.value;
180 if (ctx->edgeflag.width == 1)
181 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
182 else
183 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
184 return i;
185 }
186
187 static inline unsigned
188 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
189 {
190 unsigned i;
191 bool ef = ctx->edgeflag.value;
192 if (ctx->edgeflag.width == 1)
193 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
194 else
195 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
196 return i;
197 }
198
199 static inline unsigned
200 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
201 {
202 unsigned i;
203 bool ef = ctx->edgeflag.value;
204 if (ctx->edgeflag.width == 1)
205 for (i = 0; i < n && ef_value_8(ctx, start++) == ef; ++i);
206 else
207 for (i = 0; i < n && ef_value_32(ctx, start++) == ef; ++i);
208 return i;
209 }
210
211 static inline void *
212 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
213 {
214 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
215 struct nouveau_bo *bo;
216 uint64_t va;
217 const unsigned size = count * nvc0->vertex->size;
218
219 void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
220
221 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
222 PUSH_DATAh(push, va);
223 PUSH_DATA (push, va);
224 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
225 PUSH_DATAh(push, va + size - 1);
226 PUSH_DATA (push, va + size - 1);
227
228 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
229 bo);
230 nouveau_pushbuf_validate(push);
231
232 return dest;
233 }
234
235 static void
236 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
237 {
238 struct nouveau_pushbuf *push = ctx->push;
239 struct translate *translate = ctx->translate;
240 const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
241 unsigned pos = 0;
242
243 do {
244 unsigned nR = count;
245
246 if (unlikely(ctx->prim_restart))
247 nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
248
249 translate->run_elts8(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
250 count -= nR;
251 ctx->dest += nR * ctx->vertex_size;
252
253 while (nR) {
254 unsigned nE = nR;
255
256 if (unlikely(ctx->edgeflag.enabled))
257 nE = ef_toggle_search_i08(ctx, elts, nR);
258
259 PUSH_SPACE(push, 4);
260 if (likely(nE >= 2)) {
261 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
262 PUSH_DATA (push, pos);
263 PUSH_DATA (push, nE);
264 } else
265 if (nE) {
266 if (pos <= 0xff) {
267 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
268 } else {
269 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
270 PUSH_DATA (push, pos);
271 }
272 }
273 if (unlikely(nE != nR))
274 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
275
276 pos += nE;
277 elts += nE;
278 nR -= nE;
279 }
280 if (count) {
281 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
282 PUSH_DATA (push, 0xffffffff);
283 ++elts;
284 ctx->dest += ctx->vertex_size;
285 ++pos;
286 --count;
287 }
288 } while (count);
289 }
290
291 static void
292 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
293 {
294 struct nouveau_pushbuf *push = ctx->push;
295 struct translate *translate = ctx->translate;
296 const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
297 unsigned pos = 0;
298
299 do {
300 unsigned nR = count;
301
302 if (unlikely(ctx->prim_restart))
303 nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
304
305 translate->run_elts16(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
306 count -= nR;
307 ctx->dest += nR * ctx->vertex_size;
308
309 while (nR) {
310 unsigned nE = nR;
311
312 if (unlikely(ctx->edgeflag.enabled))
313 nE = ef_toggle_search_i16(ctx, elts, nR);
314
315 PUSH_SPACE(push, 4);
316 if (likely(nE >= 2)) {
317 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
318 PUSH_DATA (push, pos);
319 PUSH_DATA (push, nE);
320 } else
321 if (nE) {
322 if (pos <= 0xff) {
323 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
324 } else {
325 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
326 PUSH_DATA (push, pos);
327 }
328 }
329 if (unlikely(nE != nR))
330 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
331
332 pos += nE;
333 elts += nE;
334 nR -= nE;
335 }
336 if (count) {
337 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
338 PUSH_DATA (push, 0xffffffff);
339 ++elts;
340 ctx->dest += ctx->vertex_size;
341 ++pos;
342 --count;
343 }
344 } while (count);
345 }
346
347 static void
348 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
349 {
350 struct nouveau_pushbuf *push = ctx->push;
351 struct translate *translate = ctx->translate;
352 const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
353 unsigned pos = 0;
354
355 do {
356 unsigned nR = count;
357
358 if (unlikely(ctx->prim_restart))
359 nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
360
361 translate->run_elts(translate, elts, nR, 0, ctx->instance_id, ctx->dest);
362 count -= nR;
363 ctx->dest += nR * ctx->vertex_size;
364
365 while (nR) {
366 unsigned nE = nR;
367
368 if (unlikely(ctx->edgeflag.enabled))
369 nE = ef_toggle_search_i32(ctx, elts, nR);
370
371 PUSH_SPACE(push, 4);
372 if (likely(nE >= 2)) {
373 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
374 PUSH_DATA (push, pos);
375 PUSH_DATA (push, nE);
376 } else
377 if (nE) {
378 if (pos <= 0xff) {
379 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
380 } else {
381 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
382 PUSH_DATA (push, pos);
383 }
384 }
385 if (unlikely(nE != nR))
386 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
387
388 pos += nE;
389 elts += nE;
390 nR -= nE;
391 }
392 if (count) {
393 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
394 PUSH_DATA (push, 0xffffffff);
395 ++elts;
396 ctx->dest += ctx->vertex_size;
397 ++pos;
398 --count;
399 }
400 } while (count);
401 }
402
403 static void
404 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
405 {
406 struct nouveau_pushbuf *push = ctx->push;
407 struct translate *translate = ctx->translate;
408 unsigned pos = 0;
409
410 /* XXX: This will read the data corresponding to the primitive restart index,
411 * maybe we should avoid that ?
412 */
413 translate->run(translate, start, count, 0, ctx->instance_id, ctx->dest);
414 do {
415 unsigned nr = count;
416
417 if (unlikely(ctx->edgeflag.enabled))
418 nr = ef_toggle_search_seq(ctx, start + pos, nr);
419
420 PUSH_SPACE(push, 4);
421 if (likely(nr)) {
422 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
423 PUSH_DATA (push, pos);
424 PUSH_DATA (push, nr);
425 }
426 if (unlikely(nr != count))
427 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
428
429 pos += nr;
430 count -= nr;
431 } while (count);
432 }
433
434
435 #define NVC0_PRIM_GL_CASE(n) \
436 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
437
438 static inline unsigned
439 nvc0_prim_gl(unsigned prim)
440 {
441 switch (prim) {
442 NVC0_PRIM_GL_CASE(POINTS);
443 NVC0_PRIM_GL_CASE(LINES);
444 NVC0_PRIM_GL_CASE(LINE_LOOP);
445 NVC0_PRIM_GL_CASE(LINE_STRIP);
446 NVC0_PRIM_GL_CASE(TRIANGLES);
447 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
448 NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
449 NVC0_PRIM_GL_CASE(QUADS);
450 NVC0_PRIM_GL_CASE(QUAD_STRIP);
451 NVC0_PRIM_GL_CASE(POLYGON);
452 NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
453 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
454 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
455 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
456 NVC0_PRIM_GL_CASE(PATCHES);
457 default:
458 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
459 }
460 }
461
462 void
463 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
464 {
465 struct push_context ctx;
466 unsigned i, index_size;
467 unsigned inst_count = info->instance_count;
468 unsigned vert_count = info->count;
469 unsigned prim;
470
471 nvc0_push_context_init(nvc0, &ctx);
472
473 nvc0_vertex_configure_translate(nvc0, info->index_bias);
474
475 if (nvc0->state.index_bias) {
476 /* this is already taken care of by translate */
477 IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
478 nvc0->state.index_bias = 0;
479 }
480
481 if (unlikely(ctx.edgeflag.enabled))
482 nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);
483
484 ctx.prim_restart = info->primitive_restart;
485 ctx.restart_index = info->restart_index;
486
487 if (info->primitive_restart) {
488 /* NOTE: I hope we won't ever need that last index (~0).
489 * If we do, we have to disable primitive restart here always and
490 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
491 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
492 * and add manual restart to disp_vertices_seq.
493 */
494 BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
495 PUSH_DATA (ctx.push, 1);
496 PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index);
497 } else
498 if (nvc0->state.prim_restart) {
499 IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
500 }
501 nvc0->state.prim_restart = info->primitive_restart;
502
503 if (info->indexed) {
504 nvc0_push_map_idxbuf(&ctx, nvc0);
505 index_size = nvc0->idxbuf.index_size;
506 } else {
507 if (unlikely(info->count_from_stream_output)) {
508 struct pipe_context *pipe = &nvc0->base.pipe;
509 struct nvc0_so_target *targ;
510 targ = nvc0_so_target(info->count_from_stream_output);
511 pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
512 vert_count /= targ->stride;
513 }
514 ctx.idxbuf = NULL; /* shut up warnings */
515 index_size = 0;
516 }
517
518 ctx.instance_id = info->start_instance;
519
520 prim = nvc0_prim_gl(info->mode);
521 do {
522 PUSH_SPACE(ctx.push, 9);
523
524 ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
525 if (unlikely(!ctx.dest))
526 break;
527
528 if (unlikely(ctx.need_vertex_id))
529 nvc0_push_upload_vertex_ids(&ctx, nvc0, info);
530
531 if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
532 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
533 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
534 PUSH_DATA (ctx.push, prim);
535 switch (index_size) {
536 case 1:
537 disp_vertices_i08(&ctx, info->start, vert_count);
538 break;
539 case 2:
540 disp_vertices_i16(&ctx, info->start, vert_count);
541 break;
542 case 4:
543 disp_vertices_i32(&ctx, info->start, vert_count);
544 break;
545 default:
546 assert(index_size == 0);
547 disp_vertices_seq(&ctx, info->start, vert_count);
548 break;
549 }
550 PUSH_SPACE(ctx.push, 1);
551 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
552
553 if (--inst_count) {
554 prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
555 ++ctx.instance_id;
556 }
557 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX_TMP);
558 nouveau_scratch_done(&nvc0->base);
559 } while (inst_count);
560
561
562 /* reset state and unmap buffers (no-op) */
563
564 if (unlikely(!ctx.edgeflag.value)) {
565 PUSH_SPACE(ctx.push, 1);
566 IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
567 }
568
569 if (unlikely(ctx.need_vertex_id)) {
570 PUSH_SPACE(ctx.push, 4);
571 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
572 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
573 PUSH_DATA (ctx.push,
574 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
575 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
576 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
577 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
578 }
579
580 if (info->indexed)
581 nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
582 for (i = 0; i < nvc0->num_vtxbufs; ++i)
583 nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
584
585 NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
586 }
587
588 static inline void
589 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
590 {
591 unsigned i;
592 for (i = 0; i < n; ++i)
593 dst[i] = elts[i] + bias;
594 }
595
596 static inline void
597 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
598 {
599 unsigned i;
600 for (i = 0; i < n; ++i)
601 dst[i] = elts[i] + bias;
602 }
603
604 static inline void
605 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
606 {
607 unsigned i;
608 for (i = 0; i < n; ++i)
609 dst[i] = elts[i] + bias;
610 }
611
612 static void
613 nvc0_push_upload_vertex_ids(struct push_context *ctx,
614 struct nvc0_context *nvc0,
615 const struct pipe_draw_info *info)
616
617 {
618 struct nouveau_pushbuf *push = ctx->push;
619 struct nouveau_bo *bo;
620 uint64_t va;
621 uint32_t *data;
622 uint32_t format;
623 unsigned index_size = nvc0->idxbuf.index_size;
624 unsigned i;
625 unsigned a = nvc0->vertex->num_elements;
626
627 if (!index_size || info->index_bias)
628 index_size = 4;
629 data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
630 info->count * index_size, &va, &bo);
631
632 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
633 bo);
634 nouveau_pushbuf_validate(push);
635
636 if (info->indexed) {
637 if (!info->index_bias) {
638 memcpy(data, ctx->idxbuf, info->count * index_size);
639 } else {
640 switch (nvc0->idxbuf.index_size) {
641 case 1:
642 copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
643 break;
644 case 2:
645 copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
646 break;
647 default:
648 copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
649 break;
650 }
651 }
652 } else {
653 for (i = 0; i < info->count; ++i)
654 data[i] = i + (info->start + info->index_bias);
655 }
656
657 format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
658 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
659
660 switch (index_size) {
661 case 1:
662 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
663 break;
664 case 2:
665 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
666 break;
667 default:
668 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
669 break;
670 }
671
672 PUSH_SPACE(push, 12);
673
674 if (unlikely(nvc0->state.instance_elts & 2)) {
675 nvc0->state.instance_elts &= ~2;
676 IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
677 }
678
679 BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
680 PUSH_DATA (push, format);
681
682 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
683 PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
684 PUSH_DATAh(push, va);
685 PUSH_DATA (push, va);
686 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
687 PUSH_DATAh(push, va + info->count * index_size - 1);
688 PUSH_DATA (push, va + info->count * index_size - 1);
689
690 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
691 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
692
693 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
694 PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
695 }