2 * Copyright 2008 Ben Skeggs
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 #include "pipe/p_context.h"
24 #include "pipe/p_state.h"
25 #include "pipe/p_inlines.h"
27 #include "nv50_context.h"
30 nv50_push_elements_u08(struct nv50_context
*, uint8_t *, unsigned);
33 nv50_push_elements_u16(struct nv50_context
*, uint16_t *, unsigned);
36 nv50_push_elements_u32(struct nv50_context
*, uint32_t *, unsigned);
39 nv50_push_arrays(struct nv50_context
*, unsigned, unsigned);
41 static INLINE
unsigned
42 nv50_prim(unsigned mode
)
45 case PIPE_PRIM_POINTS
: return NV50TCL_VERTEX_BEGIN_POINTS
;
46 case PIPE_PRIM_LINES
: return NV50TCL_VERTEX_BEGIN_LINES
;
47 case PIPE_PRIM_LINE_LOOP
: return NV50TCL_VERTEX_BEGIN_LINE_LOOP
;
48 case PIPE_PRIM_LINE_STRIP
: return NV50TCL_VERTEX_BEGIN_LINE_STRIP
;
49 case PIPE_PRIM_TRIANGLES
: return NV50TCL_VERTEX_BEGIN_TRIANGLES
;
50 case PIPE_PRIM_TRIANGLE_STRIP
:
51 return NV50TCL_VERTEX_BEGIN_TRIANGLE_STRIP
;
52 case PIPE_PRIM_TRIANGLE_FAN
: return NV50TCL_VERTEX_BEGIN_TRIANGLE_FAN
;
53 case PIPE_PRIM_QUADS
: return NV50TCL_VERTEX_BEGIN_QUADS
;
54 case PIPE_PRIM_QUAD_STRIP
: return NV50TCL_VERTEX_BEGIN_QUAD_STRIP
;
55 case PIPE_PRIM_POLYGON
: return NV50TCL_VERTEX_BEGIN_POLYGON
;
60 NOUVEAU_ERR("invalid primitive type %d\n", mode
);
61 return NV50TCL_VERTEX_BEGIN_POINTS
;
64 static INLINE
uint32_t
65 nv50_vbo_type_to_hw(unsigned type
)
68 case PIPE_FORMAT_TYPE_FLOAT
:
69 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT
;
70 case PIPE_FORMAT_TYPE_UNORM
:
71 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM
;
72 case PIPE_FORMAT_TYPE_SNORM
:
73 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM
;
74 case PIPE_FORMAT_TYPE_USCALED
:
75 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED
;
76 case PIPE_FORMAT_TYPE_SSCALED
:
77 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED
;
79 case PIPE_FORMAT_TYPE_UINT:
80 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
81 case PIPE_FORMAT_TYPE_SINT:
82 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
88 static INLINE
uint32_t
89 nv50_vbo_size_to_hw(unsigned size
, unsigned nr_c
)
91 static const uint32_t hw_values
[] = {
93 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8
,
94 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8_8
,
95 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8_8_8
,
96 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_8_8_8_8
,
97 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16
,
98 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16_16
,
99 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16_16_16
,
100 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_16_16_16_16
,
102 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32
,
103 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32_32
,
104 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32_32_32
,
105 NV50TCL_VERTEX_ARRAY_ATTRIB_SIZE_32_32_32_32
};
107 /* we'd also have R11G11B10 and R10G10B10A2 */
109 assert(nr_c
> 0 && nr_c
<= 4);
115 return hw_values
[size
+ (nr_c
- 1)];
118 static INLINE
uint32_t
119 nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element
*ve
)
121 uint32_t hw_type
, hw_size
;
122 enum pipe_format pf
= ve
->src_format
;
123 unsigned size
= pf_size_x(pf
) << pf_exp2(pf
);
125 hw_type
= nv50_vbo_type_to_hw(pf_type(pf
));
126 hw_size
= nv50_vbo_size_to_hw(size
, ve
->nr_components
);
128 if (!hw_type
|| !hw_size
) {
129 NOUVEAU_ERR("unsupported vbo format: %s\n", pf_name(pf
));
134 if (pf_swizzle_x(pf
) == 2) /* BGRA */
135 hw_size
|= (1 << 31); /* no real swizzle bits :-( */
137 return (hw_type
| hw_size
);
141 nv50_draw_arrays(struct pipe_context
*pipe
, unsigned mode
, unsigned start
,
144 struct nv50_context
*nv50
= nv50_context(pipe
);
145 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
146 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
149 nv50_state_validate(nv50
);
151 BEGIN_RING(chan
, tesla
, 0x142c, 1);
153 BEGIN_RING(chan
, tesla
, 0x142c, 1);
156 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BEGIN
, 1);
157 OUT_RING (chan
, nv50_prim(mode
));
160 ret
= nv50_push_arrays(nv50
, start
, count
);
162 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BUFFER_FIRST
, 2);
163 OUT_RING (chan
, start
);
164 OUT_RING (chan
, count
);
167 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_END
, 1);
173 static INLINE boolean
174 nv50_draw_elements_inline_u08(struct nv50_context
*nv50
, uint8_t *map
,
175 unsigned start
, unsigned count
)
177 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
178 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
183 return nv50_push_elements_u08(nv50
, map
, count
);
186 BEGIN_RING(chan
, tesla
, 0x15e8, 1);
187 OUT_RING (chan
, map
[0]);
193 unsigned nr
= count
> 2046 ? 2046 : count
;
196 BEGIN_RING(chan
, tesla
, 0x400015f0, nr
>> 1);
197 for (i
= 0; i
< nr
; i
+= 2)
198 OUT_RING (chan
, (map
[i
+ 1] << 16) | map
[i
]);
206 static INLINE boolean
207 nv50_draw_elements_inline_u16(struct nv50_context
*nv50
, uint16_t *map
,
208 unsigned start
, unsigned count
)
210 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
211 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
216 return nv50_push_elements_u16(nv50
, map
, count
);
219 BEGIN_RING(chan
, tesla
, 0x15e8, 1);
220 OUT_RING (chan
, map
[0]);
226 unsigned nr
= count
> 2046 ? 2046 : count
;
229 BEGIN_RING(chan
, tesla
, 0x400015f0, nr
>> 1);
230 for (i
= 0; i
< nr
; i
+= 2)
231 OUT_RING (chan
, (map
[i
+ 1] << 16) | map
[i
]);
239 static INLINE boolean
240 nv50_draw_elements_inline_u32(struct nv50_context
*nv50
, uint32_t *map
,
241 unsigned start
, unsigned count
)
243 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
244 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
249 return nv50_push_elements_u32(nv50
, map
, count
);
252 unsigned nr
= count
> 2047 ? 2047 : count
;
254 BEGIN_RING(chan
, tesla
, 0x400015e8, nr
);
255 OUT_RINGp (chan
, map
, nr
);
264 nv50_draw_elements(struct pipe_context
*pipe
,
265 struct pipe_buffer
*indexBuffer
, unsigned indexSize
,
266 unsigned mode
, unsigned start
, unsigned count
)
268 struct nv50_context
*nv50
= nv50_context(pipe
);
269 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
270 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
271 struct pipe_screen
*pscreen
= pipe
->screen
;
275 map
= pipe_buffer_map(pscreen
, indexBuffer
, PIPE_BUFFER_USAGE_CPU_READ
);
277 nv50_state_validate(nv50
);
279 BEGIN_RING(chan
, tesla
, 0x142c, 1);
281 BEGIN_RING(chan
, tesla
, 0x142c, 1);
284 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BEGIN
, 1);
285 OUT_RING (chan
, nv50_prim(mode
));
288 ret
= nv50_draw_elements_inline_u08(nv50
, map
, start
, count
);
291 ret
= nv50_draw_elements_inline_u16(nv50
, map
, start
, count
);
294 ret
= nv50_draw_elements_inline_u32(nv50
, map
, start
, count
);
301 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_END
, 1);
304 pipe_buffer_unmap(pscreen
, indexBuffer
);
309 static INLINE boolean
310 nv50_vbo_static_attrib(struct nv50_context
*nv50
, unsigned attrib
,
311 struct nouveau_stateobj
**pso
,
312 struct pipe_vertex_element
*ve
,
313 struct pipe_vertex_buffer
*vb
)
316 struct nouveau_stateobj
*so
;
317 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
318 struct nouveau_bo
*bo
= nouveau_bo(vb
->buffer
);
321 enum pipe_format pf
= ve
->src_format
;
323 if ((pf_type(pf
) != PIPE_FORMAT_TYPE_FLOAT
) ||
324 (pf_size_x(pf
) << pf_exp2(pf
)) != 32)
327 ret
= nouveau_bo_map(bo
, NOUVEAU_BO_RD
);
330 v
= (float *)(bo
->map
+ (vb
->buffer_offset
+ ve
->src_offset
));
334 *pso
= so
= so_new(nv50
->vtxelt_nr
* 5, 0);
336 switch (ve
->nr_components
) {
338 so_method(so
, tesla
, NV50TCL_VTX_ATTR_4F_X(attrib
), 4);
339 so_data (so
, fui(v
[0]));
340 so_data (so
, fui(v
[1]));
341 so_data (so
, fui(v
[2]));
342 so_data (so
, fui(v
[3]));
345 so_method(so
, tesla
, NV50TCL_VTX_ATTR_3F_X(attrib
), 3);
346 so_data (so
, fui(v
[0]));
347 so_data (so
, fui(v
[1]));
348 so_data (so
, fui(v
[2]));
351 so_method(so
, tesla
, NV50TCL_VTX_ATTR_2F_X(attrib
), 2);
352 so_data (so
, fui(v
[0]));
353 so_data (so
, fui(v
[1]));
356 so_method(so
, tesla
, NV50TCL_VTX_ATTR_1F(attrib
), 1);
357 so_data (so
, fui(v
[0]));
360 nouveau_bo_unmap(bo
);
364 nouveau_bo_unmap(bo
);
369 nv50_vbo_validate(struct nv50_context
*nv50
)
371 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
372 struct nouveau_stateobj
*vtxbuf
, *vtxfmt
, *vtxattr
;
375 /* don't validate if Gallium took away our buffers */
376 if (nv50
->vtxbuf_nr
== 0)
380 for (i
= 0; i
< nv50
->vtxbuf_nr
; ++i
)
381 if (nv50
->vtxbuf
[i
].stride
&&
382 !(nv50
->vtxbuf
[i
].buffer
->usage
& PIPE_BUFFER_USAGE_VERTEX
))
383 nv50
->vbo_fifo
= 0xffff;
385 n_ve
= MAX2(nv50
->vtxelt_nr
, nv50
->state
.vtxelt_nr
);
388 vtxbuf
= so_new(n_ve
* 7, nv50
->vtxelt_nr
* 4);
389 vtxfmt
= so_new(n_ve
+ 1, 0);
390 so_method(vtxfmt
, tesla
, NV50TCL_VERTEX_ARRAY_ATTRIB(0), n_ve
);
392 for (i
= 0; i
< nv50
->vtxelt_nr
; i
++) {
393 struct pipe_vertex_element
*ve
= &nv50
->vtxelt
[i
];
394 struct pipe_vertex_buffer
*vb
=
395 &nv50
->vtxbuf
[ve
->vertex_buffer_index
];
396 struct nouveau_bo
*bo
= nouveau_bo(vb
->buffer
);
397 uint32_t hw
= nv50_vbo_vtxelt_to_hw(ve
);
400 nv50_vbo_static_attrib(nv50
, i
, &vtxattr
, ve
, vb
)) {
401 so_data(vtxfmt
, hw
| (1 << 4));
403 so_method(vtxbuf
, tesla
,
404 NV50TCL_VERTEX_ARRAY_FORMAT(i
), 1);
407 nv50
->vbo_fifo
&= ~(1 << i
);
410 so_data(vtxfmt
, hw
| i
);
412 if (nv50
->vbo_fifo
) {
413 so_method(vtxbuf
, tesla
,
414 NV50TCL_VERTEX_ARRAY_FORMAT(i
), 1);
419 so_method(vtxbuf
, tesla
, NV50TCL_VERTEX_ARRAY_FORMAT(i
), 3);
420 so_data (vtxbuf
, 0x20000000 | vb
->stride
);
421 so_reloc (vtxbuf
, bo
, vb
->buffer_offset
+
422 ve
->src_offset
, NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
|
423 NOUVEAU_BO_RD
| NOUVEAU_BO_HIGH
, 0, 0);
424 so_reloc (vtxbuf
, bo
, vb
->buffer_offset
+
425 ve
->src_offset
, NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
|
426 NOUVEAU_BO_RD
| NOUVEAU_BO_LOW
, 0, 0);
428 /* vertex array limits */
429 so_method(vtxbuf
, tesla
, 0x1080 + (i
* 8), 2);
430 so_reloc (vtxbuf
, bo
, vb
->buffer
->size
- 1,
431 NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
| NOUVEAU_BO_RD
|
432 NOUVEAU_BO_HIGH
, 0, 0);
433 so_reloc (vtxbuf
, bo
, vb
->buffer
->size
- 1,
434 NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
| NOUVEAU_BO_RD
|
435 NOUVEAU_BO_LOW
, 0, 0);
437 for (; i
< n_ve
; ++i
) {
438 so_data (vtxfmt
, 0x7e080010);
440 so_method(vtxbuf
, tesla
, NV50TCL_VERTEX_ARRAY_FORMAT(i
), 1);
443 nv50
->state
.vtxelt_nr
= nv50
->vtxelt_nr
;
445 so_ref (vtxfmt
, &nv50
->state
.vtxfmt
);
446 so_ref (vtxbuf
, &nv50
->state
.vtxbuf
);
447 so_ref (vtxattr
, &nv50
->state
.vtxattr
);
448 so_ref (NULL
, &vtxbuf
);
449 so_ref (NULL
, &vtxfmt
);
450 so_ref (NULL
, &vtxattr
);
453 typedef void (*pfn_push
)(struct nouveau_channel
*, void *);
455 struct nv50_vbo_emitctx
466 emit_vtx_next(struct nouveau_channel
*chan
, struct nv50_vbo_emitctx
*emit
)
470 for (i
= 0; i
< emit
->nr_ve
; ++i
) {
471 emit
->push
[i
](chan
, emit
->map
[i
]);
472 emit
->map
[i
] += emit
->stride
[i
];
477 emit_vtx(struct nouveau_channel
*chan
, struct nv50_vbo_emitctx
*emit
,
482 for (i
= 0; i
< emit
->nr_ve
; ++i
)
483 emit
->push
[i
](chan
, emit
->map
[i
] + emit
->stride
[i
] * vi
);
486 static INLINE boolean
487 nv50_map_vbufs(struct nv50_context
*nv50
)
491 for (i
= 0; i
< nv50
->vtxbuf_nr
; ++i
) {
492 struct pipe_vertex_buffer
*vb
= &nv50
->vtxbuf
[i
];
493 unsigned size
, delta
;
495 if (nouveau_bo(vb
->buffer
)->map
)
498 size
= vb
->stride
* (vb
->max_index
+ 1);
499 delta
= vb
->buffer_offset
;
502 size
= vb
->buffer
->size
- vb
->buffer_offset
;
504 if (nouveau_bo_map_range(nouveau_bo(vb
->buffer
),
505 delta
, size
, NOUVEAU_BO_RD
))
509 if (i
== nv50
->vtxbuf_nr
)
512 nouveau_bo_unmap(nouveau_bo(nv50
->vtxbuf
[i
].buffer
));
517 nv50_unmap_vbufs(struct nv50_context
*nv50
)
521 for (i
= 0; i
< nv50
->vtxbuf_nr
; ++i
)
522 if (nouveau_bo(nv50
->vtxbuf
[i
].buffer
)->map
)
523 nouveau_bo_unmap(nouveau_bo(nv50
->vtxbuf
[i
].buffer
));
527 emit_b32_1(struct nouveau_channel
*chan
, void *data
)
531 OUT_RING(chan
, v
[0]);
535 emit_b32_2(struct nouveau_channel
*chan
, void *data
)
539 OUT_RING(chan
, v
[0]);
540 OUT_RING(chan
, v
[1]);
544 emit_b32_3(struct nouveau_channel
*chan
, void *data
)
548 OUT_RING(chan
, v
[0]);
549 OUT_RING(chan
, v
[1]);
550 OUT_RING(chan
, v
[2]);
554 emit_b32_4(struct nouveau_channel
*chan
, void *data
)
558 OUT_RING(chan
, v
[0]);
559 OUT_RING(chan
, v
[1]);
560 OUT_RING(chan
, v
[2]);
561 OUT_RING(chan
, v
[3]);
565 emit_b16_1(struct nouveau_channel
*chan
, void *data
)
569 OUT_RING(chan
, v
[0]);
573 emit_b16_3(struct nouveau_channel
*chan
, void *data
)
577 OUT_RING(chan
, (v
[1] << 16) | v
[0]);
578 OUT_RING(chan
, v
[2]);
582 emit_b08_1(struct nouveau_channel
*chan
, void *data
)
586 OUT_RING(chan
, v
[0]);
590 emit_b08_3(struct nouveau_channel
*chan
, void *data
)
594 OUT_RING(chan
, (v
[2] << 16) | (v
[1] << 8) | v
[0]);
598 emit_prepare(struct nv50_context
*nv50
, struct nv50_vbo_emitctx
*emit
,
603 if (nv50_map_vbufs(nv50
) == FALSE
)
607 emit
->vtx_dwords
= 0;
609 for (i
= 0; i
< nv50
->vtxelt_nr
; ++i
) {
610 struct pipe_vertex_element
*ve
;
611 struct pipe_vertex_buffer
*vb
;
612 unsigned n
, type
, size
;
614 ve
= &nv50
->vtxelt
[i
];
615 vb
= &nv50
->vtxbuf
[ve
->vertex_buffer_index
];
616 if (!(nv50
->vbo_fifo
& (1 << i
)))
620 emit
->stride
[n
] = vb
->stride
;
621 emit
->map
[n
] = nouveau_bo(vb
->buffer
)->map
+
622 (start
* vb
->stride
+ ve
->src_offset
);
624 type
= pf_type(ve
->src_format
);
625 size
= pf_size_x(ve
->src_format
) << pf_exp2(ve
->src_format
);
627 assert(ve
->nr_components
> 0 && ve
->nr_components
<= 4);
629 /* It shouldn't be necessary to push the implicit 1s
630 * for case 3 and size 8 cases 1, 2, 3.
634 NOUVEAU_ERR("unsupported vtxelt size: %u\n", size
);
637 switch (ve
->nr_components
) {
638 case 1: emit
->push
[n
] = emit_b32_1
; break;
639 case 2: emit
->push
[n
] = emit_b32_2
; break;
640 case 3: emit
->push
[n
] = emit_b32_3
; break;
641 case 4: emit
->push
[n
] = emit_b32_4
; break;
643 emit
->vtx_dwords
+= ve
->nr_components
;
646 switch (ve
->nr_components
) {
647 case 1: emit
->push
[n
] = emit_b16_1
; break;
648 case 2: emit
->push
[n
] = emit_b32_1
; break;
649 case 3: emit
->push
[n
] = emit_b16_3
; break;
650 case 4: emit
->push
[n
] = emit_b32_2
; break;
652 emit
->vtx_dwords
+= (ve
->nr_components
+ 1) >> 1;
655 switch (ve
->nr_components
) {
656 case 1: emit
->push
[n
] = emit_b08_1
; break;
657 case 2: emit
->push
[n
] = emit_b16_1
; break;
658 case 3: emit
->push
[n
] = emit_b08_3
; break;
659 case 4: emit
->push
[n
] = emit_b32_1
; break;
661 emit
->vtx_dwords
+= 1;
666 emit
->vtx_max
= 512 / emit
->vtx_dwords
;
672 nv50_push_arrays(struct nv50_context
*nv50
, unsigned start
, unsigned count
)
674 struct nouveau_channel
*chan
= nv50
->screen
->base
.channel
;
675 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
676 struct nv50_vbo_emitctx emit
;
678 if (emit_prepare(nv50
, &emit
, start
) == FALSE
)
682 unsigned i
, dw
, nr
= MIN2(count
, emit
.vtx_max
);
683 dw
= nr
* emit
.vtx_dwords
;
685 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_DATA
| 0x40000000, dw
);
686 for (i
= 0; i
< nr
; ++i
)
687 emit_vtx_next(chan
, &emit
);
691 nv50_unmap_vbufs(nv50
);
697 nv50_push_elements_u32(struct nv50_context
*nv50
, uint32_t *map
, unsigned count
)
699 struct nouveau_channel
*chan
= nv50
->screen
->base
.channel
;
700 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
701 struct nv50_vbo_emitctx emit
;
703 if (emit_prepare(nv50
, &emit
, 0) == FALSE
)
707 unsigned i
, dw
, nr
= MIN2(count
, emit
.vtx_max
);
708 dw
= nr
* emit
.vtx_dwords
;
710 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_DATA
| 0x40000000, dw
);
711 for (i
= 0; i
< nr
; ++i
)
712 emit_vtx(chan
, &emit
, *map
++);
716 nv50_unmap_vbufs(nv50
);
722 nv50_push_elements_u16(struct nv50_context
*nv50
, uint16_t *map
, unsigned count
)
724 struct nouveau_channel
*chan
= nv50
->screen
->base
.channel
;
725 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
726 struct nv50_vbo_emitctx emit
;
728 if (emit_prepare(nv50
, &emit
, 0) == FALSE
)
732 unsigned i
, dw
, nr
= MIN2(count
, emit
.vtx_max
);
733 dw
= nr
* emit
.vtx_dwords
;
735 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_DATA
| 0x40000000, dw
);
736 for (i
= 0; i
< nr
; ++i
)
737 emit_vtx(chan
, &emit
, *map
++);
741 nv50_unmap_vbufs(nv50
);
747 nv50_push_elements_u08(struct nv50_context
*nv50
, uint8_t *map
, unsigned count
)
749 struct nouveau_channel
*chan
= nv50
->screen
->base
.channel
;
750 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
751 struct nv50_vbo_emitctx emit
;
753 if (emit_prepare(nv50
, &emit
, 0) == FALSE
)
757 unsigned i
, dw
, nr
= MIN2(count
, emit
.vtx_max
);
758 dw
= nr
* emit
.vtx_dwords
;
760 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_DATA
| 0x40000000, dw
);
761 for (i
= 0; i
< nr
; ++i
)
762 emit_vtx(chan
, &emit
, *map
++);
766 nv50_unmap_vbufs(nv50
);