f05618f659657f6f14ec3d5f623cef86dba167b3
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_vbo_translate.c
1
2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
7
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
10
11 #include "nvc0/nvc0_3d.xml.h"
12
13 struct push_context {
14 struct nouveau_pushbuf *push;
15
16 struct translate *translate;
17 void *dest;
18 const void *idxbuf;
19
20 uint32_t vertex_size;
21 uint32_t restart_index;
22 uint32_t start_instance;
23 uint32_t instance_id;
24
25 bool prim_restart;
26 bool need_vertex_id;
27
28 struct {
29 bool enabled;
30 bool value;
31 uint8_t width;
32 unsigned stride;
33 const uint8_t *data;
34 } edgeflag;
35 };
36
37 static void nvc0_push_upload_vertex_ids(struct push_context *,
38 struct nvc0_context *,
39 const struct pipe_draw_info *);
40
41 static void
42 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
43 {
44 ctx->push = nvc0->base.pushbuf;
45
46 ctx->translate = nvc0->vertex->translate;
47 ctx->vertex_size = nvc0->vertex->size;
48 ctx->instance_id = 0;
49
50 ctx->need_vertex_id =
51 nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
52
53 ctx->edgeflag.value = true;
54 ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
55
56 /* silence warnings */
57 ctx->edgeflag.data = NULL;
58 ctx->edgeflag.stride = 0;
59 ctx->edgeflag.width = 0;
60 }
61
62 static inline void
63 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
64 {
65 struct translate *translate = nvc0->vertex->translate;
66 unsigned i;
67
68 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
69 const uint8_t *map;
70 const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
71
72 if (likely(vb->is_user_buffer))
73 map = (const uint8_t *)vb->buffer.user;
74 else
75 map = nouveau_resource_map_offset(&nvc0->base,
76 nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
77
78 if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
79 map += (intptr_t)index_bias * vb->stride;
80
81 translate->set_buffer(translate, i, map, vb->stride, ~0);
82 }
83 }
84
85 static inline void
86 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0,
87 const struct pipe_draw_info *info,
88 unsigned offset)
89 {
90 if (!info->has_user_indices) {
91 struct nv04_resource *buf = nv04_resource(info->index.resource);
92 ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
93 buf, offset, NOUVEAU_BO_RD);
94 } else {
95 ctx->idxbuf = info->index.user;
96 }
97 }
98
99 static inline void
100 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
101 int32_t index_bias)
102 {
103 unsigned attr = nvc0->vertprog->vp.edgeflag;
104 struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
105 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
106 struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
107
108 ctx->edgeflag.stride = vb->stride;
109 ctx->edgeflag.width = util_format_get_blocksize(ve->src_format);
110 if (!vb->is_user_buffer) {
111 unsigned offset = vb->buffer_offset + ve->src_offset;
112 ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
113 buf, offset, NOUVEAU_BO_RD);
114 } else {
115 ctx->edgeflag.data = (const uint8_t *)vb->buffer.user + ve->src_offset;
116 }
117
118 if (index_bias)
119 ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
120 }
121
122 static inline unsigned
123 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
124 {
125 unsigned i;
126 for (i = 0; i < push && elts[i] != index; ++i);
127 return i;
128 }
129
130 static inline unsigned
131 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
132 {
133 unsigned i;
134 for (i = 0; i < push && elts[i] != index; ++i);
135 return i;
136 }
137
138 static inline unsigned
139 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
140 {
141 unsigned i;
142 for (i = 0; i < push && elts[i] != index; ++i);
143 return i;
144 }
145
146 static inline bool
147 ef_value_8(const struct push_context *ctx, uint32_t index)
148 {
149 uint8_t *pf = (uint8_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
150 return !!*pf;
151 }
152
153 static inline bool
154 ef_value_32(const struct push_context *ctx, uint32_t index)
155 {
156 uint32_t *pf = (uint32_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
157 return !!*pf;
158 }
159
160 static inline bool
161 ef_toggle(struct push_context *ctx)
162 {
163 ctx->edgeflag.value = !ctx->edgeflag.value;
164 return ctx->edgeflag.value;
165 }
166
167 static inline unsigned
168 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
169 {
170 unsigned i;
171 bool ef = ctx->edgeflag.value;
172 if (ctx->edgeflag.width == 1)
173 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
174 else
175 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
176 return i;
177 }
178
179 static inline unsigned
180 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
181 {
182 unsigned i;
183 bool ef = ctx->edgeflag.value;
184 if (ctx->edgeflag.width == 1)
185 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
186 else
187 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
188 return i;
189 }
190
191 static inline unsigned
192 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
193 {
194 unsigned i;
195 bool ef = ctx->edgeflag.value;
196 if (ctx->edgeflag.width == 1)
197 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
198 else
199 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
200 return i;
201 }
202
203 static inline unsigned
204 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
205 {
206 unsigned i;
207 bool ef = ctx->edgeflag.value;
208 if (ctx->edgeflag.width == 1)
209 for (i = 0; i < n && ef_value_8(ctx, start++) == ef; ++i);
210 else
211 for (i = 0; i < n && ef_value_32(ctx, start++) == ef; ++i);
212 return i;
213 }
214
215 static inline void *
216 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
217 {
218 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
219 struct nouveau_bo *bo;
220 uint64_t va;
221 const unsigned size = count * nvc0->vertex->size;
222
223 void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
224
225 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
226 PUSH_DATAh(push, va);
227 PUSH_DATA (push, va);
228 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
229 PUSH_DATAh(push, va + size - 1);
230 PUSH_DATA (push, va + size - 1);
231
232 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
233 bo);
234 nouveau_pushbuf_validate(push);
235
236 return dest;
237 }
238
239 static void
240 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
241 {
242 struct nouveau_pushbuf *push = ctx->push;
243 struct translate *translate = ctx->translate;
244 const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
245 unsigned pos = 0;
246
247 do {
248 unsigned nR = count;
249
250 if (unlikely(ctx->prim_restart))
251 nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
252
253 translate->run_elts8(translate, elts, nR,
254 ctx->start_instance, ctx->instance_id, ctx->dest);
255 count -= nR;
256 ctx->dest += nR * ctx->vertex_size;
257
258 while (nR) {
259 unsigned nE = nR;
260
261 if (unlikely(ctx->edgeflag.enabled))
262 nE = ef_toggle_search_i08(ctx, elts, nR);
263
264 PUSH_SPACE(push, 4);
265 if (likely(nE >= 2)) {
266 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
267 PUSH_DATA (push, pos);
268 PUSH_DATA (push, nE);
269 } else
270 if (nE) {
271 if (pos <= 0xff) {
272 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
273 } else {
274 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
275 PUSH_DATA (push, pos);
276 }
277 }
278 if (unlikely(nE != nR))
279 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
280
281 pos += nE;
282 elts += nE;
283 nR -= nE;
284 }
285 if (count) {
286 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
287 PUSH_DATA (push, 0xffffffff);
288 ++elts;
289 ctx->dest += ctx->vertex_size;
290 ++pos;
291 --count;
292 }
293 } while (count);
294 }
295
296 static void
297 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
298 {
299 struct nouveau_pushbuf *push = ctx->push;
300 struct translate *translate = ctx->translate;
301 const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
302 unsigned pos = 0;
303
304 do {
305 unsigned nR = count;
306
307 if (unlikely(ctx->prim_restart))
308 nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
309
310 translate->run_elts16(translate, elts, nR,
311 ctx->start_instance, ctx->instance_id, ctx->dest);
312 count -= nR;
313 ctx->dest += nR * ctx->vertex_size;
314
315 while (nR) {
316 unsigned nE = nR;
317
318 if (unlikely(ctx->edgeflag.enabled))
319 nE = ef_toggle_search_i16(ctx, elts, nR);
320
321 PUSH_SPACE(push, 4);
322 if (likely(nE >= 2)) {
323 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
324 PUSH_DATA (push, pos);
325 PUSH_DATA (push, nE);
326 } else
327 if (nE) {
328 if (pos <= 0xff) {
329 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
330 } else {
331 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
332 PUSH_DATA (push, pos);
333 }
334 }
335 if (unlikely(nE != nR))
336 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
337
338 pos += nE;
339 elts += nE;
340 nR -= nE;
341 }
342 if (count) {
343 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
344 PUSH_DATA (push, 0xffffffff);
345 ++elts;
346 ctx->dest += ctx->vertex_size;
347 ++pos;
348 --count;
349 }
350 } while (count);
351 }
352
353 static void
354 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
355 {
356 struct nouveau_pushbuf *push = ctx->push;
357 struct translate *translate = ctx->translate;
358 const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
359 unsigned pos = 0;
360
361 do {
362 unsigned nR = count;
363
364 if (unlikely(ctx->prim_restart))
365 nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
366
367 translate->run_elts(translate, elts, nR,
368 ctx->start_instance, ctx->instance_id, ctx->dest);
369 count -= nR;
370 ctx->dest += nR * ctx->vertex_size;
371
372 while (nR) {
373 unsigned nE = nR;
374
375 if (unlikely(ctx->edgeflag.enabled))
376 nE = ef_toggle_search_i32(ctx, elts, nR);
377
378 PUSH_SPACE(push, 4);
379 if (likely(nE >= 2)) {
380 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
381 PUSH_DATA (push, pos);
382 PUSH_DATA (push, nE);
383 } else
384 if (nE) {
385 if (pos <= 0xff) {
386 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
387 } else {
388 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
389 PUSH_DATA (push, pos);
390 }
391 }
392 if (unlikely(nE != nR))
393 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
394
395 pos += nE;
396 elts += nE;
397 nR -= nE;
398 }
399 if (count) {
400 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
401 PUSH_DATA (push, 0xffffffff);
402 ++elts;
403 ctx->dest += ctx->vertex_size;
404 ++pos;
405 --count;
406 }
407 } while (count);
408 }
409
410 static void
411 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
412 {
413 struct nouveau_pushbuf *push = ctx->push;
414 struct translate *translate = ctx->translate;
415 unsigned pos = 0;
416
417 /* XXX: This will read the data corresponding to the primitive restart index,
418 * maybe we should avoid that ?
419 */
420 translate->run(translate, start, count,
421 ctx->start_instance, ctx->instance_id, ctx->dest);
422 do {
423 unsigned nr = count;
424
425 if (unlikely(ctx->edgeflag.enabled))
426 nr = ef_toggle_search_seq(ctx, start + pos, nr);
427
428 PUSH_SPACE(push, 4);
429 if (likely(nr)) {
430 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
431 PUSH_DATA (push, pos);
432 PUSH_DATA (push, nr);
433 }
434 if (unlikely(nr != count))
435 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
436
437 pos += nr;
438 count -= nr;
439 } while (count);
440 }
441
442
443 #define NVC0_PRIM_GL_CASE(n) \
444 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
445
446 static inline unsigned
447 nvc0_prim_gl(unsigned prim)
448 {
449 switch (prim) {
450 NVC0_PRIM_GL_CASE(POINTS);
451 NVC0_PRIM_GL_CASE(LINES);
452 NVC0_PRIM_GL_CASE(LINE_LOOP);
453 NVC0_PRIM_GL_CASE(LINE_STRIP);
454 NVC0_PRIM_GL_CASE(TRIANGLES);
455 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
456 NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
457 NVC0_PRIM_GL_CASE(QUADS);
458 NVC0_PRIM_GL_CASE(QUAD_STRIP);
459 NVC0_PRIM_GL_CASE(POLYGON);
460 NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
461 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
462 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
463 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
464 NVC0_PRIM_GL_CASE(PATCHES);
465 default:
466 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
467 }
468 }
469
470 void
471 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
472 {
473 struct push_context ctx;
474 unsigned i, index_size;
475 unsigned inst_count = info->instance_count;
476 unsigned vert_count = info->count;
477 unsigned prim;
478
479 nvc0_push_context_init(nvc0, &ctx);
480
481 nvc0_vertex_configure_translate(nvc0, info->index_bias);
482
483 if (nvc0->state.index_bias) {
484 /* this is already taken care of by translate */
485 IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
486 nvc0->state.index_bias = 0;
487 }
488
489 if (unlikely(ctx.edgeflag.enabled))
490 nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);
491
492 ctx.prim_restart = info->primitive_restart;
493 ctx.restart_index = info->restart_index;
494
495 if (info->primitive_restart) {
496 /* NOTE: I hope we won't ever need that last index (~0).
497 * If we do, we have to disable primitive restart here always and
498 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
499 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
500 * and add manual restart to disp_vertices_seq.
501 */
502 BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
503 PUSH_DATA (ctx.push, 1);
504 PUSH_DATA (ctx.push, info->index_size ? 0xffffffff : info->restart_index);
505 } else
506 if (nvc0->state.prim_restart) {
507 IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
508 }
509 nvc0->state.prim_restart = info->primitive_restart;
510
511 if (info->index_size) {
512 nvc0_push_map_idxbuf(&ctx, nvc0, info, info->start * info->index_size);
513 index_size = info->index_size;
514 } else {
515 if (unlikely(info->count_from_stream_output)) {
516 struct pipe_context *pipe = &nvc0->base.pipe;
517 struct nvc0_so_target *targ;
518 targ = nvc0_so_target(info->count_from_stream_output);
519 pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
520 vert_count /= targ->stride;
521 }
522 ctx.idxbuf = NULL; /* shut up warnings */
523 index_size = 0;
524 }
525
526 ctx.start_instance = info->start_instance;
527
528 prim = nvc0_prim_gl(info->mode);
529 do {
530 PUSH_SPACE(ctx.push, 9);
531
532 ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
533 if (unlikely(!ctx.dest))
534 break;
535
536 if (unlikely(ctx.need_vertex_id))
537 nvc0_push_upload_vertex_ids(&ctx, nvc0, info);
538
539 if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
540 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
541 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
542 PUSH_DATA (ctx.push, prim);
543 switch (index_size) {
544 case 1:
545 disp_vertices_i08(&ctx, info->start, vert_count);
546 break;
547 case 2:
548 disp_vertices_i16(&ctx, info->start, vert_count);
549 break;
550 case 4:
551 disp_vertices_i32(&ctx, info->start, vert_count);
552 break;
553 default:
554 assert(index_size == 0);
555 disp_vertices_seq(&ctx, info->start, vert_count);
556 break;
557 }
558 PUSH_SPACE(ctx.push, 1);
559 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
560
561 if (--inst_count) {
562 prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
563 ++ctx.instance_id;
564 }
565 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX_TMP);
566 nouveau_scratch_done(&nvc0->base);
567 } while (inst_count);
568
569
570 /* reset state and unmap buffers (no-op) */
571
572 if (unlikely(!ctx.edgeflag.value)) {
573 PUSH_SPACE(ctx.push, 1);
574 IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
575 }
576
577 if (unlikely(ctx.need_vertex_id)) {
578 PUSH_SPACE(ctx.push, 4);
579 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
580 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
581 PUSH_DATA (ctx.push,
582 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
583 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
584 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
585 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
586 }
587
588 if (info->index_size && !info->has_user_indices)
589 nouveau_resource_unmap(nv04_resource(info->index.resource));
590 for (i = 0; i < nvc0->num_vtxbufs; ++i)
591 nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
592
593 NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
594 }
595
596 static inline void
597 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
598 {
599 unsigned i;
600 for (i = 0; i < n; ++i)
601 dst[i] = elts[i] + bias;
602 }
603
604 static inline void
605 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
606 {
607 unsigned i;
608 for (i = 0; i < n; ++i)
609 dst[i] = elts[i] + bias;
610 }
611
612 static inline void
613 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
614 {
615 unsigned i;
616 for (i = 0; i < n; ++i)
617 dst[i] = elts[i] + bias;
618 }
619
620 static void
621 nvc0_push_upload_vertex_ids(struct push_context *ctx,
622 struct nvc0_context *nvc0,
623 const struct pipe_draw_info *info)
624
625 {
626 struct nouveau_pushbuf *push = ctx->push;
627 struct nouveau_bo *bo;
628 uint64_t va;
629 uint32_t *data;
630 uint32_t format;
631 unsigned index_size = info->index_size;
632 unsigned i;
633 unsigned a = nvc0->vertex->num_elements;
634
635 if (!index_size || info->index_bias)
636 index_size = 4;
637 data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
638 info->count * index_size, &va, &bo);
639
640 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
641 bo);
642 nouveau_pushbuf_validate(push);
643
644 if (info->index_size) {
645 if (!info->index_bias) {
646 memcpy(data, ctx->idxbuf, info->count * index_size);
647 } else {
648 switch (info->index_size) {
649 case 1:
650 copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
651 break;
652 case 2:
653 copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
654 break;
655 default:
656 copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
657 break;
658 }
659 }
660 } else {
661 for (i = 0; i < info->count; ++i)
662 data[i] = i + (info->start + info->index_bias);
663 }
664
665 format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
666 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
667
668 switch (index_size) {
669 case 1:
670 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
671 break;
672 case 2:
673 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
674 break;
675 default:
676 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
677 break;
678 }
679
680 PUSH_SPACE(push, 12);
681
682 if (unlikely(nvc0->state.instance_elts & 2)) {
683 nvc0->state.instance_elts &= ~2;
684 IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
685 }
686
687 BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
688 PUSH_DATA (push, format);
689
690 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
691 PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
692 PUSH_DATAh(push, va);
693 PUSH_DATA (push, va);
694 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
695 PUSH_DATAh(push, va + info->count * index_size - 1);
696 PUSH_DATA (push, va + info->count * index_size - 1);
697
698 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
699 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
700
701 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
702 PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
703 }