nvc0: change ACQUIRE_EQUAL to ACQUIRE_GEQUAL in nvc0_hw_query_fifo_wait
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_vbo_translate.c
1
2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/u_format.h"
6 #include "translate/translate.h"
7
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
10
11 #include "nvc0/nvc0_3d.xml.h"
12
13 struct push_context {
14 struct nouveau_pushbuf *push;
15
16 struct translate *translate;
17 void *dest;
18 const void *idxbuf;
19
20 uint32_t vertex_size;
21 uint32_t restart_index;
22 uint32_t start_instance;
23 uint32_t instance_id;
24
25 bool prim_restart;
26 bool need_vertex_id;
27
28 struct {
29 bool enabled;
30 bool value;
31 uint8_t width;
32 unsigned stride;
33 const uint8_t *data;
34 } edgeflag;
35 };
36
37 static void nvc0_push_upload_vertex_ids(struct push_context *,
38 struct nvc0_context *,
39 const struct pipe_draw_info *);
40
41 static void
42 nvc0_push_context_init(struct nvc0_context *nvc0, struct push_context *ctx)
43 {
44 ctx->push = nvc0->base.pushbuf;
45
46 ctx->translate = nvc0->vertex->translate;
47 ctx->vertex_size = nvc0->vertex->size;
48 ctx->instance_id = 0;
49
50 ctx->need_vertex_id =
51 nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
52
53 ctx->edgeflag.value = true;
54 ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
55
56 /* silence warnings */
57 ctx->edgeflag.data = NULL;
58 ctx->edgeflag.stride = 0;
59 ctx->edgeflag.width = 0;
60 }
61
62 static inline void
63 nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
64 {
65 struct translate *translate = nvc0->vertex->translate;
66 unsigned i;
67
68 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
69 const uint8_t *map;
70 const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
71
72 if (likely(vb->is_user_buffer))
73 map = (const uint8_t *)vb->buffer.user;
74 else
75 map = nouveau_resource_map_offset(&nvc0->base,
76 nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
77
78 if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
79 map += (intptr_t)index_bias * vb->stride;
80
81 translate->set_buffer(translate, i, map, vb->stride, ~0);
82 }
83 }
84
85 static inline void
86 nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0,
87 const struct pipe_draw_info *info)
88 {
89 if (!info->has_user_indices) {
90 struct nv04_resource *buf = nv04_resource(info->index.resource);
91 ctx->idxbuf = nouveau_resource_map_offset(
92 &nvc0->base, buf, 0, NOUVEAU_BO_RD);
93 } else {
94 ctx->idxbuf = info->index.user;
95 }
96 }
97
98 static inline void
99 nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
100 int32_t index_bias)
101 {
102 unsigned attr = nvc0->vertprog->vp.edgeflag;
103 struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
104 struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
105 struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
106
107 ctx->edgeflag.stride = vb->stride;
108 ctx->edgeflag.width = util_format_get_blocksize(ve->src_format);
109 if (!vb->is_user_buffer) {
110 unsigned offset = vb->buffer_offset + ve->src_offset;
111 ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
112 buf, offset, NOUVEAU_BO_RD);
113 } else {
114 ctx->edgeflag.data = (const uint8_t *)vb->buffer.user + ve->src_offset;
115 }
116
117 if (index_bias)
118 ctx->edgeflag.data += (intptr_t)index_bias * vb->stride;
119 }
120
121 static inline unsigned
122 prim_restart_search_i08(const uint8_t *elts, unsigned push, uint8_t index)
123 {
124 unsigned i;
125 for (i = 0; i < push && elts[i] != index; ++i);
126 return i;
127 }
128
129 static inline unsigned
130 prim_restart_search_i16(const uint16_t *elts, unsigned push, uint16_t index)
131 {
132 unsigned i;
133 for (i = 0; i < push && elts[i] != index; ++i);
134 return i;
135 }
136
137 static inline unsigned
138 prim_restart_search_i32(const uint32_t *elts, unsigned push, uint32_t index)
139 {
140 unsigned i;
141 for (i = 0; i < push && elts[i] != index; ++i);
142 return i;
143 }
144
145 static inline bool
146 ef_value_8(const struct push_context *ctx, uint32_t index)
147 {
148 uint8_t *pf = (uint8_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
149 return !!*pf;
150 }
151
152 static inline bool
153 ef_value_32(const struct push_context *ctx, uint32_t index)
154 {
155 uint32_t *pf = (uint32_t *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
156 return !!*pf;
157 }
158
159 static inline bool
160 ef_toggle(struct push_context *ctx)
161 {
162 ctx->edgeflag.value = !ctx->edgeflag.value;
163 return ctx->edgeflag.value;
164 }
165
166 static inline unsigned
167 ef_toggle_search_i08(struct push_context *ctx, const uint8_t *elts, unsigned n)
168 {
169 unsigned i;
170 bool ef = ctx->edgeflag.value;
171 if (ctx->edgeflag.width == 1)
172 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
173 else
174 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
175 return i;
176 }
177
178 static inline unsigned
179 ef_toggle_search_i16(struct push_context *ctx, const uint16_t *elts, unsigned n)
180 {
181 unsigned i;
182 bool ef = ctx->edgeflag.value;
183 if (ctx->edgeflag.width == 1)
184 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
185 else
186 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
187 return i;
188 }
189
190 static inline unsigned
191 ef_toggle_search_i32(struct push_context *ctx, const uint32_t *elts, unsigned n)
192 {
193 unsigned i;
194 bool ef = ctx->edgeflag.value;
195 if (ctx->edgeflag.width == 1)
196 for (i = 0; i < n && ef_value_8(ctx, elts[i]) == ef; ++i);
197 else
198 for (i = 0; i < n && ef_value_32(ctx, elts[i]) == ef; ++i);
199 return i;
200 }
201
202 static inline unsigned
203 ef_toggle_search_seq(struct push_context *ctx, unsigned start, unsigned n)
204 {
205 unsigned i;
206 bool ef = ctx->edgeflag.value;
207 if (ctx->edgeflag.width == 1)
208 for (i = 0; i < n && ef_value_8(ctx, start++) == ef; ++i);
209 else
210 for (i = 0; i < n && ef_value_32(ctx, start++) == ef; ++i);
211 return i;
212 }
213
214 static inline void *
215 nvc0_push_setup_vertex_array(struct nvc0_context *nvc0, const unsigned count)
216 {
217 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
218 struct nouveau_bo *bo;
219 uint64_t va;
220 const unsigned size = count * nvc0->vertex->size;
221
222 void *const dest = nouveau_scratch_get(&nvc0->base, size, &va, &bo);
223
224 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
225 PUSH_DATAh(push, va);
226 PUSH_DATA (push, va);
227 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
228 PUSH_DATAh(push, va + size - 1);
229 PUSH_DATA (push, va + size - 1);
230
231 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
232 bo);
233 nouveau_pushbuf_validate(push);
234
235 return dest;
236 }
237
238 static void
239 disp_vertices_i08(struct push_context *ctx, unsigned start, unsigned count)
240 {
241 struct nouveau_pushbuf *push = ctx->push;
242 struct translate *translate = ctx->translate;
243 const uint8_t *restrict elts = (uint8_t *)ctx->idxbuf + start;
244 unsigned pos = 0;
245
246 do {
247 unsigned nR = count;
248
249 if (unlikely(ctx->prim_restart))
250 nR = prim_restart_search_i08(elts, nR, ctx->restart_index);
251
252 translate->run_elts8(translate, elts, nR,
253 ctx->start_instance, ctx->instance_id, ctx->dest);
254 count -= nR;
255 ctx->dest += nR * ctx->vertex_size;
256
257 while (nR) {
258 unsigned nE = nR;
259
260 if (unlikely(ctx->edgeflag.enabled))
261 nE = ef_toggle_search_i08(ctx, elts, nR);
262
263 PUSH_SPACE(push, 4);
264 if (likely(nE >= 2)) {
265 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
266 PUSH_DATA (push, pos);
267 PUSH_DATA (push, nE);
268 } else
269 if (nE) {
270 if (pos <= 0xff) {
271 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
272 } else {
273 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
274 PUSH_DATA (push, pos);
275 }
276 }
277 if (unlikely(nE != nR))
278 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
279
280 pos += nE;
281 elts += nE;
282 nR -= nE;
283 }
284 if (count) {
285 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
286 PUSH_DATA (push, 0xffffffff);
287 ++elts;
288 ctx->dest += ctx->vertex_size;
289 ++pos;
290 --count;
291 }
292 } while (count);
293 }
294
295 static void
296 disp_vertices_i16(struct push_context *ctx, unsigned start, unsigned count)
297 {
298 struct nouveau_pushbuf *push = ctx->push;
299 struct translate *translate = ctx->translate;
300 const uint16_t *restrict elts = (uint16_t *)ctx->idxbuf + start;
301 unsigned pos = 0;
302
303 do {
304 unsigned nR = count;
305
306 if (unlikely(ctx->prim_restart))
307 nR = prim_restart_search_i16(elts, nR, ctx->restart_index);
308
309 translate->run_elts16(translate, elts, nR,
310 ctx->start_instance, ctx->instance_id, ctx->dest);
311 count -= nR;
312 ctx->dest += nR * ctx->vertex_size;
313
314 while (nR) {
315 unsigned nE = nR;
316
317 if (unlikely(ctx->edgeflag.enabled))
318 nE = ef_toggle_search_i16(ctx, elts, nR);
319
320 PUSH_SPACE(push, 4);
321 if (likely(nE >= 2)) {
322 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
323 PUSH_DATA (push, pos);
324 PUSH_DATA (push, nE);
325 } else
326 if (nE) {
327 if (pos <= 0xff) {
328 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
329 } else {
330 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
331 PUSH_DATA (push, pos);
332 }
333 }
334 if (unlikely(nE != nR))
335 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
336
337 pos += nE;
338 elts += nE;
339 nR -= nE;
340 }
341 if (count) {
342 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
343 PUSH_DATA (push, 0xffffffff);
344 ++elts;
345 ctx->dest += ctx->vertex_size;
346 ++pos;
347 --count;
348 }
349 } while (count);
350 }
351
352 static void
353 disp_vertices_i32(struct push_context *ctx, unsigned start, unsigned count)
354 {
355 struct nouveau_pushbuf *push = ctx->push;
356 struct translate *translate = ctx->translate;
357 const uint32_t *restrict elts = (uint32_t *)ctx->idxbuf + start;
358 unsigned pos = 0;
359
360 do {
361 unsigned nR = count;
362
363 if (unlikely(ctx->prim_restart))
364 nR = prim_restart_search_i32(elts, nR, ctx->restart_index);
365
366 translate->run_elts(translate, elts, nR,
367 ctx->start_instance, ctx->instance_id, ctx->dest);
368 count -= nR;
369 ctx->dest += nR * ctx->vertex_size;
370
371 while (nR) {
372 unsigned nE = nR;
373
374 if (unlikely(ctx->edgeflag.enabled))
375 nE = ef_toggle_search_i32(ctx, elts, nR);
376
377 PUSH_SPACE(push, 4);
378 if (likely(nE >= 2)) {
379 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
380 PUSH_DATA (push, pos);
381 PUSH_DATA (push, nE);
382 } else
383 if (nE) {
384 if (pos <= 0xff) {
385 IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_U32), pos);
386 } else {
387 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
388 PUSH_DATA (push, pos);
389 }
390 }
391 if (unlikely(nE != nR))
392 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
393
394 pos += nE;
395 elts += nE;
396 nR -= nE;
397 }
398 if (count) {
399 BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_U32), 1);
400 PUSH_DATA (push, 0xffffffff);
401 ++elts;
402 ctx->dest += ctx->vertex_size;
403 ++pos;
404 --count;
405 }
406 } while (count);
407 }
408
409 static void
410 disp_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
411 {
412 struct nouveau_pushbuf *push = ctx->push;
413 struct translate *translate = ctx->translate;
414 unsigned pos = 0;
415
416 /* XXX: This will read the data corresponding to the primitive restart index,
417 * maybe we should avoid that ?
418 */
419 translate->run(translate, start, count,
420 ctx->start_instance, ctx->instance_id, ctx->dest);
421 do {
422 unsigned nr = count;
423
424 if (unlikely(ctx->edgeflag.enabled))
425 nr = ef_toggle_search_seq(ctx, start + pos, nr);
426
427 PUSH_SPACE(push, 4);
428 if (likely(nr)) {
429 BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
430 PUSH_DATA (push, pos);
431 PUSH_DATA (push, nr);
432 }
433 if (unlikely(nr != count))
434 IMMED_NVC0(push, NVC0_3D(EDGEFLAG), ef_toggle(ctx));
435
436 pos += nr;
437 count -= nr;
438 } while (count);
439 }
440
441
442 #define NVC0_PRIM_GL_CASE(n) \
443 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
444
445 static inline unsigned
446 nvc0_prim_gl(unsigned prim)
447 {
448 switch (prim) {
449 NVC0_PRIM_GL_CASE(POINTS);
450 NVC0_PRIM_GL_CASE(LINES);
451 NVC0_PRIM_GL_CASE(LINE_LOOP);
452 NVC0_PRIM_GL_CASE(LINE_STRIP);
453 NVC0_PRIM_GL_CASE(TRIANGLES);
454 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP);
455 NVC0_PRIM_GL_CASE(TRIANGLE_FAN);
456 NVC0_PRIM_GL_CASE(QUADS);
457 NVC0_PRIM_GL_CASE(QUAD_STRIP);
458 NVC0_PRIM_GL_CASE(POLYGON);
459 NVC0_PRIM_GL_CASE(LINES_ADJACENCY);
460 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY);
461 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY);
462 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY);
463 NVC0_PRIM_GL_CASE(PATCHES);
464 default:
465 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS;
466 }
467 }
468
469 void
470 nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
471 {
472 struct push_context ctx;
473 unsigned i, index_size;
474 unsigned inst_count = info->instance_count;
475 unsigned vert_count = info->count;
476 unsigned prim;
477
478 nvc0_push_context_init(nvc0, &ctx);
479
480 nvc0_vertex_configure_translate(nvc0, info->index_bias);
481
482 if (nvc0->state.index_bias) {
483 /* this is already taken care of by translate */
484 IMMED_NVC0(ctx.push, NVC0_3D(VB_ELEMENT_BASE), 0);
485 nvc0->state.index_bias = 0;
486 }
487
488 if (unlikely(ctx.edgeflag.enabled))
489 nvc0_push_map_edgeflag(&ctx, nvc0, info->index_bias);
490
491 ctx.prim_restart = info->primitive_restart;
492 ctx.restart_index = info->restart_index;
493
494 if (info->primitive_restart) {
495 /* NOTE: I hope we won't ever need that last index (~0).
496 * If we do, we have to disable primitive restart here always and
497 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
498 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
499 * and add manual restart to disp_vertices_seq.
500 */
501 BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
502 PUSH_DATA (ctx.push, 1);
503 PUSH_DATA (ctx.push, info->index_size ? 0xffffffff : info->restart_index);
504 } else
505 if (nvc0->state.prim_restart) {
506 IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
507 }
508 nvc0->state.prim_restart = info->primitive_restart;
509
510 if (info->index_size) {
511 nvc0_push_map_idxbuf(&ctx, nvc0, info);
512 index_size = info->index_size;
513 } else {
514 if (unlikely(info->count_from_stream_output)) {
515 struct pipe_context *pipe = &nvc0->base.pipe;
516 struct nvc0_so_target *targ;
517 targ = nvc0_so_target(info->count_from_stream_output);
518 pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
519 vert_count /= targ->stride;
520 }
521 ctx.idxbuf = NULL; /* shut up warnings */
522 index_size = 0;
523 }
524
525 ctx.start_instance = info->start_instance;
526
527 prim = nvc0_prim_gl(info->mode);
528 do {
529 PUSH_SPACE(ctx.push, 9);
530
531 ctx.dest = nvc0_push_setup_vertex_array(nvc0, vert_count);
532 if (unlikely(!ctx.dest))
533 break;
534
535 if (unlikely(ctx.need_vertex_id))
536 nvc0_push_upload_vertex_ids(&ctx, nvc0, info);
537
538 if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
539 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
540 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_BEGIN_GL), 1);
541 PUSH_DATA (ctx.push, prim);
542 switch (index_size) {
543 case 1:
544 disp_vertices_i08(&ctx, info->start, vert_count);
545 break;
546 case 2:
547 disp_vertices_i16(&ctx, info->start, vert_count);
548 break;
549 case 4:
550 disp_vertices_i32(&ctx, info->start, vert_count);
551 break;
552 default:
553 assert(index_size == 0);
554 disp_vertices_seq(&ctx, info->start, vert_count);
555 break;
556 }
557 PUSH_SPACE(ctx.push, 1);
558 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_END_GL), 0);
559
560 if (--inst_count) {
561 prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
562 ++ctx.instance_id;
563 }
564 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX_TMP);
565 nouveau_scratch_done(&nvc0->base);
566 } while (inst_count);
567
568
569 /* reset state and unmap buffers (no-op) */
570
571 if (unlikely(!ctx.edgeflag.value)) {
572 PUSH_SPACE(ctx.push, 1);
573 IMMED_NVC0(ctx.push, NVC0_3D(EDGEFLAG), 1);
574 }
575
576 if (unlikely(ctx.need_vertex_id)) {
577 PUSH_SPACE(ctx.push, 4);
578 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
579 BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
580 PUSH_DATA (ctx.push,
581 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
582 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
583 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
584 IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
585 }
586
587 if (info->index_size && !info->has_user_indices)
588 nouveau_resource_unmap(nv04_resource(info->index.resource));
589 for (i = 0; i < nvc0->num_vtxbufs; ++i)
590 nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
591
592 NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
593 }
594
595 static inline void
596 copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
597 {
598 unsigned i;
599 for (i = 0; i < n; ++i)
600 dst[i] = elts[i] + bias;
601 }
602
603 static inline void
604 copy_indices_u16(uint32_t *dst, const uint16_t *elts, uint32_t bias, unsigned n)
605 {
606 unsigned i;
607 for (i = 0; i < n; ++i)
608 dst[i] = elts[i] + bias;
609 }
610
611 static inline void
612 copy_indices_u32(uint32_t *dst, const uint32_t *elts, uint32_t bias, unsigned n)
613 {
614 unsigned i;
615 for (i = 0; i < n; ++i)
616 dst[i] = elts[i] + bias;
617 }
618
619 static void
620 nvc0_push_upload_vertex_ids(struct push_context *ctx,
621 struct nvc0_context *nvc0,
622 const struct pipe_draw_info *info)
623
624 {
625 struct nouveau_pushbuf *push = ctx->push;
626 struct nouveau_bo *bo;
627 uint64_t va;
628 uint32_t *data;
629 uint32_t format;
630 unsigned index_size = info->index_size;
631 unsigned i;
632 unsigned a = nvc0->vertex->num_elements;
633
634 if (!index_size || info->index_bias)
635 index_size = 4;
636 data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
637 info->count * index_size, &va, &bo);
638
639 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
640 bo);
641 nouveau_pushbuf_validate(push);
642
643 if (info->index_size) {
644 if (!info->index_bias) {
645 memcpy(data, ctx->idxbuf, info->count * index_size);
646 } else {
647 switch (info->index_size) {
648 case 1:
649 copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
650 break;
651 case 2:
652 copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
653 break;
654 default:
655 copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
656 break;
657 }
658 }
659 } else {
660 for (i = 0; i < info->count; ++i)
661 data[i] = i + (info->start + info->index_bias);
662 }
663
664 format = (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT) |
665 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT;
666
667 switch (index_size) {
668 case 1:
669 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8;
670 break;
671 case 2:
672 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16;
673 break;
674 default:
675 format |= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32;
676 break;
677 }
678
679 PUSH_SPACE(push, 12);
680
681 if (unlikely(nvc0->state.instance_elts & 2)) {
682 nvc0->state.instance_elts &= ~2;
683 IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
684 }
685
686 BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(a)), 1);
687 PUSH_DATA (push, format);
688
689 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
690 PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | index_size);
691 PUSH_DATAh(push, va);
692 PUSH_DATA (push, va);
693 BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
694 PUSH_DATAh(push, va + info->count * index_size - 1);
695 PUSH_DATA (push, va + info->count * index_size - 1);
696
697 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
698 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
699
700 BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_REPLACE), 1);
701 PUSH_DATA (push, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) | 1);
702 }