2 #include "pipe/p_context.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
5 #include "util/format/u_format.h"
6 #include "translate/translate.h"
8 #include "nvc0/nvc0_context.h"
9 #include "nvc0/nvc0_resource.h"
11 #include "nvc0/nvc0_3d.xml.h"
14 struct nouveau_pushbuf
*push
;
16 struct translate
*translate
;
21 uint32_t restart_index
;
22 uint32_t start_instance
;
37 static void nvc0_push_upload_vertex_ids(struct push_context
*,
38 struct nvc0_context
*,
39 const struct pipe_draw_info
*);
42 nvc0_push_context_init(struct nvc0_context
*nvc0
, struct push_context
*ctx
)
44 ctx
->push
= nvc0
->base
.pushbuf
;
46 ctx
->translate
= nvc0
->vertex
->translate
;
47 ctx
->vertex_size
= nvc0
->vertex
->size
;
51 nvc0
->vertprog
->vp
.need_vertex_id
&& (nvc0
->vertex
->num_elements
< 32);
53 ctx
->edgeflag
.value
= true;
54 ctx
->edgeflag
.enabled
= nvc0
->vertprog
->vp
.edgeflag
< PIPE_MAX_ATTRIBS
;
56 /* silence warnings */
57 ctx
->edgeflag
.data
= NULL
;
58 ctx
->edgeflag
.stride
= 0;
59 ctx
->edgeflag
.width
= 0;
63 nvc0_vertex_configure_translate(struct nvc0_context
*nvc0
, int32_t index_bias
)
65 struct translate
*translate
= nvc0
->vertex
->translate
;
68 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
) {
70 const struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[i
];
72 if (likely(vb
->is_user_buffer
))
73 map
= (const uint8_t *)vb
->buffer
.user
;
75 if (!vb
->buffer
.resource
)
78 map
= nouveau_resource_map_offset(&nvc0
->base
,
79 nv04_resource(vb
->buffer
.resource
), vb
->buffer_offset
, NOUVEAU_BO_RD
);
82 if (index_bias
&& !unlikely(nvc0
->vertex
->instance_bufs
& (1 << i
)))
83 map
+= (intptr_t)index_bias
* vb
->stride
;
85 translate
->set_buffer(translate
, i
, map
, vb
->stride
, ~0);
90 nvc0_push_map_idxbuf(struct push_context
*ctx
, struct nvc0_context
*nvc0
,
91 const struct pipe_draw_info
*info
)
93 if (!info
->has_user_indices
) {
94 struct nv04_resource
*buf
= nv04_resource(info
->index
.resource
);
95 ctx
->idxbuf
= nouveau_resource_map_offset(
96 &nvc0
->base
, buf
, 0, NOUVEAU_BO_RD
);
98 ctx
->idxbuf
= info
->index
.user
;
103 nvc0_push_map_edgeflag(struct push_context
*ctx
, struct nvc0_context
*nvc0
,
106 unsigned attr
= nvc0
->vertprog
->vp
.edgeflag
;
107 struct pipe_vertex_element
*ve
= &nvc0
->vertex
->element
[attr
].pipe
;
108 struct pipe_vertex_buffer
*vb
= &nvc0
->vtxbuf
[ve
->vertex_buffer_index
];
109 struct nv04_resource
*buf
= nv04_resource(vb
->buffer
.resource
);
111 ctx
->edgeflag
.stride
= vb
->stride
;
112 ctx
->edgeflag
.width
= util_format_get_blocksize(ve
->src_format
);
113 if (!vb
->is_user_buffer
) {
114 unsigned offset
= vb
->buffer_offset
+ ve
->src_offset
;
115 ctx
->edgeflag
.data
= nouveau_resource_map_offset(&nvc0
->base
,
116 buf
, offset
, NOUVEAU_BO_RD
);
118 ctx
->edgeflag
.data
= (const uint8_t *)vb
->buffer
.user
+ ve
->src_offset
;
122 ctx
->edgeflag
.data
+= (intptr_t)index_bias
* vb
->stride
;
125 static inline unsigned
126 prim_restart_search_i08(const uint8_t *elts
, unsigned push
, uint8_t index
)
129 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
133 static inline unsigned
134 prim_restart_search_i16(const uint16_t *elts
, unsigned push
, uint16_t index
)
137 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
141 static inline unsigned
142 prim_restart_search_i32(const uint32_t *elts
, unsigned push
, uint32_t index
)
145 for (i
= 0; i
< push
&& elts
[i
] != index
; ++i
);
150 ef_value_8(const struct push_context
*ctx
, uint32_t index
)
152 uint8_t *pf
= (uint8_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
157 ef_value_32(const struct push_context
*ctx
, uint32_t index
)
159 uint32_t *pf
= (uint32_t *)&ctx
->edgeflag
.data
[index
* ctx
->edgeflag
.stride
];
164 ef_toggle(struct push_context
*ctx
)
166 ctx
->edgeflag
.value
= !ctx
->edgeflag
.value
;
167 return ctx
->edgeflag
.value
;
170 static inline unsigned
171 ef_toggle_search_i08(struct push_context
*ctx
, const uint8_t *elts
, unsigned n
)
174 bool ef
= ctx
->edgeflag
.value
;
175 if (ctx
->edgeflag
.width
== 1)
176 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
178 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
182 static inline unsigned
183 ef_toggle_search_i16(struct push_context
*ctx
, const uint16_t *elts
, unsigned n
)
186 bool ef
= ctx
->edgeflag
.value
;
187 if (ctx
->edgeflag
.width
== 1)
188 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
190 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
194 static inline unsigned
195 ef_toggle_search_i32(struct push_context
*ctx
, const uint32_t *elts
, unsigned n
)
198 bool ef
= ctx
->edgeflag
.value
;
199 if (ctx
->edgeflag
.width
== 1)
200 for (i
= 0; i
< n
&& ef_value_8(ctx
, elts
[i
]) == ef
; ++i
);
202 for (i
= 0; i
< n
&& ef_value_32(ctx
, elts
[i
]) == ef
; ++i
);
206 static inline unsigned
207 ef_toggle_search_seq(struct push_context
*ctx
, unsigned start
, unsigned n
)
210 bool ef
= ctx
->edgeflag
.value
;
211 if (ctx
->edgeflag
.width
== 1)
212 for (i
= 0; i
< n
&& ef_value_8(ctx
, start
++) == ef
; ++i
);
214 for (i
= 0; i
< n
&& ef_value_32(ctx
, start
++) == ef
; ++i
);
219 nvc0_push_setup_vertex_array(struct nvc0_context
*nvc0
, const unsigned count
)
221 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
222 struct nouveau_bo
*bo
;
224 const unsigned size
= count
* nvc0
->vertex
->size
;
226 void *const dest
= nouveau_scratch_get(&nvc0
->base
, size
, &va
, &bo
);
228 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_START_HIGH(0)), 2);
229 PUSH_DATAh(push
, va
);
230 PUSH_DATA (push
, va
);
232 if (nvc0
->screen
->eng3d
->oclass
< TU102_3D_CLASS
)
233 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
235 BEGIN_NVC0(push
, SUBC_3D(TU102_3D_VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
236 PUSH_DATAh(push
, va
+ size
- 1);
237 PUSH_DATA (push
, va
+ size
- 1);
239 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
241 nouveau_pushbuf_validate(push
);
247 disp_vertices_i08(struct push_context
*ctx
, unsigned start
, unsigned count
)
249 struct nouveau_pushbuf
*push
= ctx
->push
;
250 struct translate
*translate
= ctx
->translate
;
251 const uint8_t *restrict elts
= (uint8_t *)ctx
->idxbuf
+ start
;
257 if (unlikely(ctx
->prim_restart
))
258 nR
= prim_restart_search_i08(elts
, nR
, ctx
->restart_index
);
260 translate
->run_elts8(translate
, elts
, nR
,
261 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
263 ctx
->dest
+= nR
* ctx
->vertex_size
;
268 if (unlikely(ctx
->edgeflag
.enabled
))
269 nE
= ef_toggle_search_i08(ctx
, elts
, nR
);
272 if (likely(nE
>= 2)) {
273 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
274 PUSH_DATA (push
, pos
);
275 PUSH_DATA (push
, nE
);
279 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
281 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
282 PUSH_DATA (push
, pos
);
285 if (unlikely(nE
!= nR
))
286 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
293 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
294 PUSH_DATA (push
, 0xffffffff);
296 ctx
->dest
+= ctx
->vertex_size
;
304 disp_vertices_i16(struct push_context
*ctx
, unsigned start
, unsigned count
)
306 struct nouveau_pushbuf
*push
= ctx
->push
;
307 struct translate
*translate
= ctx
->translate
;
308 const uint16_t *restrict elts
= (uint16_t *)ctx
->idxbuf
+ start
;
314 if (unlikely(ctx
->prim_restart
))
315 nR
= prim_restart_search_i16(elts
, nR
, ctx
->restart_index
);
317 translate
->run_elts16(translate
, elts
, nR
,
318 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
320 ctx
->dest
+= nR
* ctx
->vertex_size
;
325 if (unlikely(ctx
->edgeflag
.enabled
))
326 nE
= ef_toggle_search_i16(ctx
, elts
, nR
);
329 if (likely(nE
>= 2)) {
330 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
331 PUSH_DATA (push
, pos
);
332 PUSH_DATA (push
, nE
);
336 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
338 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
339 PUSH_DATA (push
, pos
);
342 if (unlikely(nE
!= nR
))
343 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
350 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
351 PUSH_DATA (push
, 0xffffffff);
353 ctx
->dest
+= ctx
->vertex_size
;
361 disp_vertices_i32(struct push_context
*ctx
, unsigned start
, unsigned count
)
363 struct nouveau_pushbuf
*push
= ctx
->push
;
364 struct translate
*translate
= ctx
->translate
;
365 const uint32_t *restrict elts
= (uint32_t *)ctx
->idxbuf
+ start
;
371 if (unlikely(ctx
->prim_restart
))
372 nR
= prim_restart_search_i32(elts
, nR
, ctx
->restart_index
);
374 translate
->run_elts(translate
, elts
, nR
,
375 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
377 ctx
->dest
+= nR
* ctx
->vertex_size
;
382 if (unlikely(ctx
->edgeflag
.enabled
))
383 nE
= ef_toggle_search_i32(ctx
, elts
, nR
);
386 if (likely(nE
>= 2)) {
387 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
388 PUSH_DATA (push
, pos
);
389 PUSH_DATA (push
, nE
);
393 IMMED_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), pos
);
395 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
396 PUSH_DATA (push
, pos
);
399 if (unlikely(nE
!= nR
))
400 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
407 BEGIN_NVC0(push
, NVC0_3D(VB_ELEMENT_U32
), 1);
408 PUSH_DATA (push
, 0xffffffff);
410 ctx
->dest
+= ctx
->vertex_size
;
418 disp_vertices_seq(struct push_context
*ctx
, unsigned start
, unsigned count
)
420 struct nouveau_pushbuf
*push
= ctx
->push
;
421 struct translate
*translate
= ctx
->translate
;
424 /* XXX: This will read the data corresponding to the primitive restart index,
425 * maybe we should avoid that ?
427 translate
->run(translate
, start
, count
,
428 ctx
->start_instance
, ctx
->instance_id
, ctx
->dest
);
432 if (unlikely(ctx
->edgeflag
.enabled
))
433 nr
= ef_toggle_search_seq(ctx
, start
+ pos
, nr
);
437 BEGIN_NVC0(push
, NVC0_3D(VERTEX_BUFFER_FIRST
), 2);
438 PUSH_DATA (push
, pos
);
439 PUSH_DATA (push
, nr
);
441 if (unlikely(nr
!= count
))
442 IMMED_NVC0(push
, NVC0_3D(EDGEFLAG
), ef_toggle(ctx
));
450 #define NVC0_PRIM_GL_CASE(n) \
451 case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
453 static inline unsigned
454 nvc0_prim_gl(unsigned prim
)
457 NVC0_PRIM_GL_CASE(POINTS
);
458 NVC0_PRIM_GL_CASE(LINES
);
459 NVC0_PRIM_GL_CASE(LINE_LOOP
);
460 NVC0_PRIM_GL_CASE(LINE_STRIP
);
461 NVC0_PRIM_GL_CASE(TRIANGLES
);
462 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP
);
463 NVC0_PRIM_GL_CASE(TRIANGLE_FAN
);
464 NVC0_PRIM_GL_CASE(QUADS
);
465 NVC0_PRIM_GL_CASE(QUAD_STRIP
);
466 NVC0_PRIM_GL_CASE(POLYGON
);
467 NVC0_PRIM_GL_CASE(LINES_ADJACENCY
);
468 NVC0_PRIM_GL_CASE(LINE_STRIP_ADJACENCY
);
469 NVC0_PRIM_GL_CASE(TRIANGLES_ADJACENCY
);
470 NVC0_PRIM_GL_CASE(TRIANGLE_STRIP_ADJACENCY
);
471 NVC0_PRIM_GL_CASE(PATCHES
);
473 return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_POINTS
;
481 uint32_t baseInstance
;
482 } DrawArraysIndirectCommand
;
489 uint32_t baseInstance
;
490 } DrawElementsIndirectCommand
;
493 nvc0_push_vbo_indirect(struct nvc0_context
*nvc0
, const struct pipe_draw_info
*info
)
495 /* The strategy here is to just read the commands from the indirect buffer
496 * and do the draws. This is suboptimal, but will only happen in the case
497 * that conversion is required for FIXED or DOUBLE inputs.
499 struct nvc0_screen
*screen
= nvc0
->screen
;
500 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
501 struct nv04_resource
*buf
= nv04_resource(info
->indirect
->buffer
);
502 struct nv04_resource
*buf_count
= nv04_resource(info
->indirect
->indirect_draw_count
);
505 unsigned draw_count
= info
->indirect
->draw_count
;
507 uint32_t *count
= nouveau_resource_map_offset(
508 &nvc0
->base
, buf_count
, info
->indirect
->indirect_draw_count_offset
,
513 uint8_t *buf_data
= nouveau_resource_map_offset(
514 &nvc0
->base
, buf
, info
->indirect
->offset
, NOUVEAU_BO_RD
);
515 struct pipe_draw_info single
= *info
;
516 single
.indirect
= NULL
;
517 for (i
= 0; i
< draw_count
; i
++, buf_data
+= info
->indirect
->stride
) {
518 if (info
->index_size
) {
519 DrawElementsIndirectCommand
*cmd
= (void *)buf_data
;
520 single
.start
= info
->start
+ cmd
->firstIndex
;
521 single
.count
= cmd
->count
;
522 single
.start_instance
= cmd
->baseInstance
;
523 single
.instance_count
= cmd
->primCount
;
524 single
.index_bias
= cmd
->baseVertex
;
526 DrawArraysIndirectCommand
*cmd
= (void *)buf_data
;
527 single
.start
= cmd
->first
;
528 single
.count
= cmd
->count
;
529 single
.start_instance
= cmd
->baseInstance
;
530 single
.instance_count
= cmd
->primCount
;
533 if (nvc0
->vertprog
->vp
.need_draw_parameters
) {
535 BEGIN_NVC0(push
, NVC0_3D(CB_SIZE
), 3);
536 PUSH_DATA (push
, NVC0_CB_AUX_SIZE
);
537 PUSH_DATAh(push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
538 PUSH_DATA (push
, screen
->uniform_bo
->offset
+ NVC0_CB_AUX_INFO(0));
539 BEGIN_1IC0(push
, NVC0_3D(CB_POS
), 1 + 3);
540 PUSH_DATA (push
, NVC0_CB_AUX_DRAW_INFO
);
541 PUSH_DATA (push
, single
.index_bias
);
542 PUSH_DATA (push
, single
.start_instance
);
543 PUSH_DATA (push
, single
.drawid
+ i
);
546 nvc0_push_vbo(nvc0
, &single
);
549 nouveau_resource_unmap(buf
);
551 nouveau_resource_unmap(buf_count
);
555 nvc0_push_vbo(struct nvc0_context
*nvc0
, const struct pipe_draw_info
*info
)
557 struct push_context ctx
;
558 unsigned i
, index_size
;
559 unsigned inst_count
= info
->instance_count
;
560 unsigned vert_count
= info
->count
;
563 nvc0_push_context_init(nvc0
, &ctx
);
565 nvc0_vertex_configure_translate(nvc0
, info
->index_bias
);
567 if (nvc0
->state
.index_bias
) {
568 /* this is already taken care of by translate */
569 IMMED_NVC0(ctx
.push
, NVC0_3D(VB_ELEMENT_BASE
), 0);
570 nvc0
->state
.index_bias
= 0;
573 if (unlikely(ctx
.edgeflag
.enabled
))
574 nvc0_push_map_edgeflag(&ctx
, nvc0
, info
->index_bias
);
576 ctx
.prim_restart
= info
->primitive_restart
;
577 ctx
.restart_index
= info
->restart_index
;
579 if (info
->primitive_restart
) {
580 /* NOTE: I hope we won't ever need that last index (~0).
581 * If we do, we have to disable primitive restart here always and
582 * use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
583 * We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
584 * and add manual restart to disp_vertices_seq.
586 BEGIN_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 2);
587 PUSH_DATA (ctx
.push
, 1);
588 PUSH_DATA (ctx
.push
, info
->index_size
? 0xffffffff : info
->restart_index
);
590 if (nvc0
->state
.prim_restart
) {
591 IMMED_NVC0(ctx
.push
, NVC0_3D(PRIM_RESTART_ENABLE
), 0);
593 nvc0
->state
.prim_restart
= info
->primitive_restart
;
595 if (info
->index_size
) {
596 nvc0_push_map_idxbuf(&ctx
, nvc0
, info
);
597 index_size
= info
->index_size
;
599 if (unlikely(info
->count_from_stream_output
)) {
600 struct pipe_context
*pipe
= &nvc0
->base
.pipe
;
601 struct nvc0_so_target
*targ
;
602 targ
= nvc0_so_target(info
->count_from_stream_output
);
603 pipe
->get_query_result(pipe
, targ
->pq
, true, (void *)&vert_count
);
604 vert_count
/= targ
->stride
;
606 ctx
.idxbuf
= NULL
; /* shut up warnings */
610 ctx
.start_instance
= info
->start_instance
;
612 prim
= nvc0_prim_gl(info
->mode
);
614 PUSH_SPACE(ctx
.push
, 9);
616 ctx
.dest
= nvc0_push_setup_vertex_array(nvc0
, vert_count
);
617 if (unlikely(!ctx
.dest
))
620 if (unlikely(ctx
.need_vertex_id
))
621 nvc0_push_upload_vertex_ids(&ctx
, nvc0
, info
);
623 if (nvc0
->screen
->eng3d
->oclass
< GM107_3D_CLASS
)
624 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FLUSH
), 0);
625 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_BEGIN_GL
), 1);
626 PUSH_DATA (ctx
.push
, prim
);
627 switch (index_size
) {
629 disp_vertices_i08(&ctx
, info
->start
, vert_count
);
632 disp_vertices_i16(&ctx
, info
->start
, vert_count
);
635 disp_vertices_i32(&ctx
, info
->start
, vert_count
);
638 assert(index_size
== 0);
639 disp_vertices_seq(&ctx
, info
->start
, vert_count
);
642 PUSH_SPACE(ctx
.push
, 1);
643 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_END_GL
), 0);
646 prim
|= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT
;
649 nouveau_bufctx_reset(nvc0
->bufctx_3d
, NVC0_BIND_3D_VTX_TMP
);
650 nouveau_scratch_done(&nvc0
->base
);
651 } while (inst_count
);
654 /* reset state and unmap buffers (no-op) */
656 if (unlikely(!ctx
.edgeflag
.value
)) {
657 PUSH_SPACE(ctx
.push
, 1);
658 IMMED_NVC0(ctx
.push
, NVC0_3D(EDGEFLAG
), 1);
661 if (unlikely(ctx
.need_vertex_id
)) {
662 PUSH_SPACE(ctx
.push
, 4);
663 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ID_REPLACE
), 0);
664 BEGIN_NVC0(ctx
.push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
666 NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST
|
667 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT
|
668 NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
);
669 IMMED_NVC0(ctx
.push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
672 if (info
->index_size
&& !info
->has_user_indices
)
673 nouveau_resource_unmap(nv04_resource(info
->index
.resource
));
674 for (i
= 0; i
< nvc0
->num_vtxbufs
; ++i
)
675 nouveau_resource_unmap(nv04_resource(nvc0
->vtxbuf
[i
].buffer
.resource
));
677 NOUVEAU_DRV_STAT(&nvc0
->screen
->base
, draw_calls_fallback_count
, 1);
681 copy_indices_u8(uint32_t *dst
, const uint8_t *elts
, uint32_t bias
, unsigned n
)
684 for (i
= 0; i
< n
; ++i
)
685 dst
[i
] = elts
[i
] + bias
;
689 copy_indices_u16(uint32_t *dst
, const uint16_t *elts
, uint32_t bias
, unsigned n
)
692 for (i
= 0; i
< n
; ++i
)
693 dst
[i
] = elts
[i
] + bias
;
697 copy_indices_u32(uint32_t *dst
, const uint32_t *elts
, uint32_t bias
, unsigned n
)
700 for (i
= 0; i
< n
; ++i
)
701 dst
[i
] = elts
[i
] + bias
;
705 nvc0_push_upload_vertex_ids(struct push_context
*ctx
,
706 struct nvc0_context
*nvc0
,
707 const struct pipe_draw_info
*info
)
710 struct nouveau_pushbuf
*push
= ctx
->push
;
711 struct nouveau_bo
*bo
;
715 unsigned index_size
= info
->index_size
;
717 unsigned a
= nvc0
->vertex
->num_elements
;
719 if (!index_size
|| info
->index_bias
)
721 data
= (uint32_t *)nouveau_scratch_get(&nvc0
->base
,
722 info
->count
* index_size
, &va
, &bo
);
724 BCTX_REFN_bo(nvc0
->bufctx_3d
, 3D_VTX_TMP
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
726 nouveau_pushbuf_validate(push
);
728 if (info
->index_size
) {
729 if (!info
->index_bias
) {
730 memcpy(data
, ctx
->idxbuf
, info
->count
* index_size
);
732 switch (info
->index_size
) {
734 copy_indices_u8(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
737 copy_indices_u16(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
740 copy_indices_u32(data
, ctx
->idxbuf
, info
->index_bias
, info
->count
);
745 for (i
= 0; i
< info
->count
; ++i
)
746 data
[i
] = i
+ (info
->start
+ info
->index_bias
);
749 format
= (1 << NVC0_3D_VERTEX_ATTRIB_FORMAT_BUFFER__SHIFT
) |
750 NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_UINT
;
752 switch (index_size
) {
754 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_8
;
757 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_16
;
760 format
|= NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32
;
764 PUSH_SPACE(push
, 12);
766 if (unlikely(nvc0
->state
.instance_elts
& 2)) {
767 nvc0
->state
.instance_elts
&= ~2;
768 IMMED_NVC0(push
, NVC0_3D(VERTEX_ARRAY_PER_INSTANCE(1)), 0);
771 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ATTRIB_FORMAT(a
)), 1);
772 PUSH_DATA (push
, format
);
774 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 3);
775 PUSH_DATA (push
, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE
| index_size
);
776 PUSH_DATAh(push
, va
);
777 PUSH_DATA (push
, va
);
779 if (nvc0
->screen
->eng3d
->oclass
< TU102_3D_CLASS
)
780 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
782 BEGIN_NVC0(push
, SUBC_3D(TU102_3D_VERTEX_ARRAY_LIMIT_HIGH(1)), 2);
783 PUSH_DATAh(push
, va
+ info
->count
* index_size
- 1);
784 PUSH_DATA (push
, va
+ info
->count
* index_size
- 1);
786 #define NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a) \
787 (((0x80 + (a) * 0x10) / 4) << NVC0_3D_VERTEX_ID_REPLACE_SOURCE__SHIFT)
789 BEGIN_NVC0(push
, NVC0_3D(VERTEX_ID_REPLACE
), 1);
790 PUSH_DATA (push
, NVC0_3D_VERTEX_ID_REPLACE_SOURCE_ATTR_X(a
) | 1);