2 * Copyright 2008 Ben Skeggs
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 #include "pipe/p_context.h"
24 #include "pipe/p_state.h"
25 #include "util/u_inlines.h"
26 #include "util/u_format.h"
28 #include "nouveau/nouveau_util.h"
29 #include "nv50_context.h"
31 static INLINE
uint32_t
32 nv50_vbo_type_to_hw(enum pipe_format format
)
34 const struct util_format_description
*desc
;
36 desc
= util_format_description(format
);
39 switch (desc
->channel
[0].type
) {
40 case UTIL_FORMAT_TYPE_FLOAT
:
41 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_FLOAT
;
42 case UTIL_FORMAT_TYPE_UNSIGNED
:
43 if (desc
->channel
[0].normalized
) {
44 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UNORM
;
46 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_USCALED
;
47 case UTIL_FORMAT_TYPE_SIGNED
:
48 if (desc
->channel
[0].normalized
) {
49 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SNORM
;
51 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SSCALED
;
53 case PIPE_FORMAT_TYPE_UINT:
54 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_UINT;
55 case PIPE_FORMAT_TYPE_SINT:
56 return NV50TCL_VERTEX_ARRAY_ATTRIB_TYPE_SINT; */
62 static INLINE
uint32_t
63 nv50_vbo_size_to_hw(unsigned size
, unsigned nr_c
)
65 static const uint32_t hw_values
[] = {
67 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8
,
68 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8
,
69 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8
,
70 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_8_8_8_8
,
71 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16
,
72 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16
,
73 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16
,
74 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_16_16_16_16
,
76 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32
,
77 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32
,
78 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32
,
79 NV50TCL_VERTEX_ARRAY_ATTRIB_FORMAT_32_32_32_32
};
81 /* we'd also have R11G11B10 and R10G10B10A2 */
83 assert(nr_c
> 0 && nr_c
<= 4);
89 return hw_values
[size
+ (nr_c
- 1)];
92 static INLINE
uint32_t
93 nv50_vbo_vtxelt_to_hw(struct pipe_vertex_element
*ve
)
95 uint32_t hw_type
, hw_size
;
96 enum pipe_format pf
= ve
->src_format
;
97 const struct util_format_description
*desc
;
98 unsigned size
, nr_components
;
100 desc
= util_format_description(pf
);
103 size
= util_format_get_component_bits(pf
, UTIL_FORMAT_COLORSPACE_RGB
, 0);
104 nr_components
= util_format_get_nr_components(pf
);
106 hw_type
= nv50_vbo_type_to_hw(pf
);
107 hw_size
= nv50_vbo_size_to_hw(size
, nr_components
);
109 if (!hw_type
|| !hw_size
) {
110 NOUVEAU_ERR("unsupported vbo format: %s\n", util_format_name(pf
));
115 if (desc
->swizzle
[0] == UTIL_FORMAT_SWIZZLE_Z
) /* BGRA */
116 hw_size
|= (1 << 31); /* no real swizzle bits :-( */
118 return (hw_type
| hw_size
);
122 struct nouveau_bo
*bo
;
130 instance_init(struct nv50_context
*nv50
, struct instance
*a
, unsigned first
)
134 for (i
= 0; i
< nv50
->vtxelt
->num_elements
; i
++) {
135 struct pipe_vertex_element
*ve
= &nv50
->vtxelt
->pipe
[i
];
136 struct pipe_vertex_buffer
*vb
;
138 a
[i
].divisor
= ve
->instance_divisor
;
140 vb
= &nv50
->vtxbuf
[ve
->vertex_buffer_index
];
142 a
[i
].bo
= nouveau_bo(vb
->buffer
);
143 a
[i
].stride
= vb
->stride
;
144 a
[i
].step
= first
% a
[i
].divisor
;
145 a
[i
].delta
= vb
->buffer_offset
+ ve
->src_offset
+
146 (first
* a
[i
].stride
);
152 instance_step(struct nv50_context
*nv50
, struct instance
*a
)
154 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
155 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
158 for (i
= 0; i
< nv50
->vtxelt
->num_elements
; i
++) {
162 BEGIN_RING(chan
, tesla
,
163 NV50TCL_VERTEX_ARRAY_START_HIGH(i
), 2);
164 OUT_RELOCh(chan
, a
[i
].bo
, a
[i
].delta
, NOUVEAU_BO_RD
|
165 NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
);
166 OUT_RELOCl(chan
, a
[i
].bo
, a
[i
].delta
, NOUVEAU_BO_RD
|
167 NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
);
168 if (++a
[i
].step
== a
[i
].divisor
) {
170 a
[i
].delta
+= a
[i
].stride
;
176 nv50_draw_arrays_instanced(struct pipe_context
*pipe
,
177 unsigned mode
, unsigned start
, unsigned count
,
178 unsigned startInstance
, unsigned instanceCount
)
180 struct nv50_context
*nv50
= nv50_context(pipe
);
181 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
182 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
183 struct instance a
[16];
184 unsigned prim
= nv50_prim(mode
);
186 instance_init(nv50
, a
, startInstance
);
187 if (!nv50_state_validate(nv50
, 10 + 16*3))
190 if (nv50
->vbo_fifo
) {
191 nv50_push_elements_instanced(pipe
, NULL
, 0, mode
, start
,
192 count
, startInstance
,
197 BEGIN_RING(chan
, tesla
, NV50TCL_CB_ADDR
, 2);
198 OUT_RING (chan
, NV50_CB_AUX
| (24 << 8));
199 OUT_RING (chan
, startInstance
);
200 while (instanceCount
--) {
201 if (AVAIL_RING(chan
) < (7 + 16*3)) {
203 if (!nv50_state_validate(nv50
, 7 + 16*3)) {
208 instance_step(nv50
, a
);
210 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BEGIN
, 1);
211 OUT_RING (chan
, prim
);
212 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BUFFER_FIRST
, 2);
213 OUT_RING (chan
, start
);
214 OUT_RING (chan
, count
);
215 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_END
, 1);
223 nv50_draw_arrays(struct pipe_context
*pipe
, unsigned mode
, unsigned start
,
226 nv50_draw_arrays_instanced(pipe
, mode
, start
, count
, 0, 1);
230 struct nv50_context
*nv50
;
235 inline_elt08(void *priv
, unsigned start
, unsigned count
)
237 struct inline_ctx
*ctx
= priv
;
238 struct nouveau_grobj
*tesla
= ctx
->nv50
->screen
->tesla
;
239 struct nouveau_channel
*chan
= tesla
->channel
;
240 uint8_t *map
= (uint8_t *)ctx
->map
+ start
;
243 BEGIN_RING(chan
, tesla
, NV50TCL_VB_ELEMENT_U32
, 1);
244 OUT_RING (chan
, map
[0]);
253 BEGIN_RING_NI(chan
, tesla
, NV50TCL_VB_ELEMENT_U16
, count
);
255 OUT_RING(chan
, (map
[1] << 16) | map
[0]);
261 inline_elt16(void *priv
, unsigned start
, unsigned count
)
263 struct inline_ctx
*ctx
= priv
;
264 struct nouveau_grobj
*tesla
= ctx
->nv50
->screen
->tesla
;
265 struct nouveau_channel
*chan
= tesla
->channel
;
266 uint16_t *map
= (uint16_t *)ctx
->map
+ start
;
269 BEGIN_RING(chan
, tesla
, NV50TCL_VB_ELEMENT_U32
, 1);
270 OUT_RING (chan
, map
[0]);
279 BEGIN_RING_NI(chan
, tesla
, NV50TCL_VB_ELEMENT_U16
, count
);
281 OUT_RING(chan
, (map
[1] << 16) | map
[0]);
287 inline_elt32(void *priv
, unsigned start
, unsigned count
)
289 struct inline_ctx
*ctx
= priv
;
290 struct nouveau_grobj
*tesla
= ctx
->nv50
->screen
->tesla
;
291 struct nouveau_channel
*chan
= tesla
->channel
;
293 BEGIN_RING_NI(chan
, tesla
, NV50TCL_VB_ELEMENT_U32
, count
);
294 OUT_RINGp (chan
, (uint32_t *)ctx
->map
+ start
, count
);
298 inline_edgeflag(void *priv
, boolean enabled
)
300 struct inline_ctx
*ctx
= priv
;
301 struct nouveau_grobj
*tesla
= ctx
->nv50
->screen
->tesla
;
302 struct nouveau_channel
*chan
= tesla
->channel
;
304 BEGIN_RING(chan
, tesla
, NV50TCL_EDGEFLAG_ENABLE
, 1);
305 OUT_RING (chan
, enabled
? 1 : 0);
309 nv50_draw_elements_inline(struct pipe_context
*pipe
,
310 struct pipe_buffer
*indexBuffer
, unsigned indexSize
,
311 unsigned mode
, unsigned start
, unsigned count
,
312 unsigned startInstance
, unsigned instanceCount
)
314 struct pipe_screen
*pscreen
= pipe
->screen
;
315 struct nv50_context
*nv50
= nv50_context(pipe
);
316 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
317 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
318 struct instance a
[16];
319 struct inline_ctx ctx
;
320 struct u_split_prim s
;
324 overhead
= 16*3; /* potential instance adjustments */
325 overhead
+= 4; /* Begin()/End() */
326 overhead
+= 4; /* potential edgeflag disable/reenable */
327 overhead
+= 3; /* potentially 3 VTX_ELT_U16/U32 packet headers */
331 s
.emit
= inline_elt08
;
334 s
.emit
= inline_elt16
;
336 s
.emit
= inline_elt32
;
337 s
.edge
= inline_edgeflag
;
340 ctx
.map
= pipe_buffer_map(pscreen
, indexBuffer
, PIPE_BUFFER_USAGE_CPU_READ
);
345 instance_init(nv50
, a
, startInstance
);
346 if (!nv50_state_validate(nv50
, overhead
+ 6 + 3))
349 BEGIN_RING(chan
, tesla
, NV50TCL_CB_ADDR
, 2);
350 OUT_RING (chan
, NV50_CB_AUX
| (24 << 8));
351 OUT_RING (chan
, startInstance
);
352 while (instanceCount
--) {
356 u_split_prim_init(&s
, mode
, start
, count
);
358 if (AVAIL_RING(chan
) < (overhead
+ 6)) {
360 if (!nv50_state_validate(nv50
, (overhead
+ 6))) {
366 max_verts
= AVAIL_RING(chan
) - overhead
;
367 if (max_verts
> 2047)
371 instance_step(nv50
, a
);
373 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BEGIN
, 1);
374 OUT_RING (chan
, nv50_prim(s
.mode
) | (nzi
? (1<<28) : 0));
375 done
= u_split_prim_next(&s
, max_verts
);
376 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_END
, 1);
383 pipe_buffer_unmap(pscreen
, indexBuffer
);
387 nv50_draw_elements_instanced(struct pipe_context
*pipe
,
388 struct pipe_buffer
*indexBuffer
,
390 unsigned mode
, unsigned start
, unsigned count
,
391 unsigned startInstance
, unsigned instanceCount
)
393 struct nv50_context
*nv50
= nv50_context(pipe
);
394 struct nouveau_channel
*chan
= nv50
->screen
->tesla
->channel
;
395 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
396 struct instance a
[16];
397 unsigned prim
= nv50_prim(mode
);
399 instance_init(nv50
, a
, startInstance
);
400 if (!nv50_state_validate(nv50
, 13 + 16*3))
403 if (nv50
->vbo_fifo
) {
404 nv50_push_elements_instanced(pipe
, indexBuffer
, indexSize
,
405 mode
, start
, count
, startInstance
,
409 if (!(indexBuffer
->usage
& PIPE_BUFFER_USAGE_INDEX
) || indexSize
== 1) {
410 nv50_draw_elements_inline(pipe
, indexBuffer
, indexSize
,
411 mode
, start
, count
, startInstance
,
416 BEGIN_RING(chan
, tesla
, NV50TCL_CB_ADDR
, 2);
417 OUT_RING (chan
, NV50_CB_AUX
| (24 << 8));
418 OUT_RING (chan
, startInstance
);
419 while (instanceCount
--) {
420 if (AVAIL_RING(chan
) < (7 + 16*3)) {
422 if (!nv50_state_validate(nv50
, 10 + 16*3)) {
427 instance_step(nv50
, a
);
429 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BEGIN
, 1);
430 OUT_RING (chan
, prim
);
431 if (indexSize
== 4) {
432 BEGIN_RING(chan
, tesla
, NV50TCL_VB_ELEMENT_U32
| 0x30000, 0);
433 OUT_RING (chan
, count
);
434 nouveau_pushbuf_submit(chan
, nouveau_bo(indexBuffer
),
435 start
<< 2, count
<< 2);
437 if (indexSize
== 2) {
438 unsigned vb_start
= (start
& ~1);
439 unsigned vb_end
= (start
+ count
+ 1) & ~1;
440 unsigned dwords
= (vb_end
- vb_start
) >> 1;
442 BEGIN_RING(chan
, tesla
, NV50TCL_VB_ELEMENT_U16_SETUP
, 1);
443 OUT_RING (chan
, ((start
& 1) << 31) | count
);
444 BEGIN_RING(chan
, tesla
, NV50TCL_VB_ELEMENT_U16
| 0x30000, 0);
445 OUT_RING (chan
, dwords
);
446 nouveau_pushbuf_submit(chan
, nouveau_bo(indexBuffer
),
447 vb_start
<< 1, dwords
<< 2);
448 BEGIN_RING(chan
, tesla
, NV50TCL_VB_ELEMENT_U16_SETUP
, 1);
451 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_END
, 1);
459 nv50_draw_elements(struct pipe_context
*pipe
,
460 struct pipe_buffer
*indexBuffer
, unsigned indexSize
,
461 unsigned mode
, unsigned start
, unsigned count
)
463 nv50_draw_elements_instanced(pipe
, indexBuffer
, indexSize
,
464 mode
, start
, count
, 0, 1);
467 static INLINE boolean
468 nv50_vbo_static_attrib(struct nv50_context
*nv50
, unsigned attrib
,
469 struct nouveau_stateobj
**pso
,
470 struct pipe_vertex_element
*ve
,
471 struct pipe_vertex_buffer
*vb
)
474 struct nouveau_stateobj
*so
;
475 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
476 struct nouveau_bo
*bo
= nouveau_bo(vb
->buffer
);
479 unsigned nr_components
= util_format_get_nr_components(ve
->src_format
);
481 ret
= nouveau_bo_map(bo
, NOUVEAU_BO_RD
);
485 util_format_read_4f(ve
->src_format
, v
, 0, (uint8_t *)bo
->map
+
486 (vb
->buffer_offset
+ ve
->src_offset
), 0,
490 *pso
= so
= so_new(nv50
->vtxelt
->num_elements
,
491 nv50
->vtxelt
->num_elements
* 4, 0);
493 switch (nr_components
) {
495 so_method(so
, tesla
, NV50TCL_VTX_ATTR_4F_X(attrib
), 4);
496 so_data (so
, fui(v
[0]));
497 so_data (so
, fui(v
[1]));
498 so_data (so
, fui(v
[2]));
499 so_data (so
, fui(v
[3]));
502 so_method(so
, tesla
, NV50TCL_VTX_ATTR_3F_X(attrib
), 3);
503 so_data (so
, fui(v
[0]));
504 so_data (so
, fui(v
[1]));
505 so_data (so
, fui(v
[2]));
508 so_method(so
, tesla
, NV50TCL_VTX_ATTR_2F_X(attrib
), 2);
509 so_data (so
, fui(v
[0]));
510 so_data (so
, fui(v
[1]));
513 if (attrib
== nv50
->vertprog
->cfg
.edgeflag_in
) {
514 so_method(so
, tesla
, NV50TCL_EDGEFLAG_ENABLE
, 1);
515 so_data (so
, v
[0] ? 1 : 0);
517 so_method(so
, tesla
, NV50TCL_VTX_ATTR_1F(attrib
), 1);
518 so_data (so
, fui(v
[0]));
521 nouveau_bo_unmap(bo
);
525 nouveau_bo_unmap(bo
);
530 nv50_vtxelt_construct(struct nv50_vtxelt_stateobj
*cso
)
534 for (i
= 0; i
< cso
->num_elements
; ++i
) {
535 struct pipe_vertex_element
*ve
= &cso
->pipe
[i
];
537 cso
->hw
[i
] = nv50_vbo_vtxelt_to_hw(ve
);
541 struct nouveau_stateobj
*
542 nv50_vbo_validate(struct nv50_context
*nv50
)
544 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
545 struct nouveau_stateobj
*vtxbuf
, *vtxfmt
, *vtxattr
;
548 /* don't validate if Gallium took away our buffers */
549 if (nv50
->vtxbuf_nr
== 0)
553 if (nv50
->screen
->force_push
||
554 nv50
->vertprog
->cfg
.edgeflag_in
< 16)
555 nv50
->vbo_fifo
= 0xffff;
557 for (i
= 0; i
< nv50
->vtxbuf_nr
; i
++) {
558 if (nv50
->vtxbuf
[i
].stride
&&
559 !(nv50
->vtxbuf
[i
].buffer
->usage
& PIPE_BUFFER_USAGE_VERTEX
))
560 nv50
->vbo_fifo
= 0xffff;
563 n_ve
= MAX2(nv50
->vtxelt
->num_elements
, nv50
->state
.vtxelt_nr
);
566 vtxbuf
= so_new(n_ve
* 2, n_ve
* 5, nv50
->vtxelt
->num_elements
* 4);
567 vtxfmt
= so_new(1, n_ve
, 0);
568 so_method(vtxfmt
, tesla
, NV50TCL_VERTEX_ARRAY_ATTRIB(0), n_ve
);
570 for (i
= 0; i
< nv50
->vtxelt
->num_elements
; i
++) {
571 struct pipe_vertex_element
*ve
= &nv50
->vtxelt
->pipe
[i
];
572 struct pipe_vertex_buffer
*vb
=
573 &nv50
->vtxbuf
[ve
->vertex_buffer_index
];
574 struct nouveau_bo
*bo
= nouveau_bo(vb
->buffer
);
575 uint32_t hw
= nv50
->vtxelt
->hw
[i
];
578 nv50_vbo_static_attrib(nv50
, i
, &vtxattr
, ve
, vb
)) {
579 so_data(vtxfmt
, hw
| (1 << 4));
581 so_method(vtxbuf
, tesla
,
582 NV50TCL_VERTEX_ARRAY_FORMAT(i
), 1);
585 nv50
->vbo_fifo
&= ~(1 << i
);
589 if (nv50
->vbo_fifo
) {
590 so_data (vtxfmt
, hw
| (ve
->instance_divisor
? (1 << 4) : i
));
591 so_method(vtxbuf
, tesla
,
592 NV50TCL_VERTEX_ARRAY_FORMAT(i
), 1);
597 so_data(vtxfmt
, hw
| i
);
599 so_method(vtxbuf
, tesla
, NV50TCL_VERTEX_ARRAY_FORMAT(i
), 3);
600 so_data (vtxbuf
, 0x20000000 |
601 (ve
->instance_divisor
? 0 : vb
->stride
));
602 so_reloc (vtxbuf
, bo
, vb
->buffer_offset
+
603 ve
->src_offset
, NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
|
604 NOUVEAU_BO_RD
| NOUVEAU_BO_HIGH
, 0, 0);
605 so_reloc (vtxbuf
, bo
, vb
->buffer_offset
+
606 ve
->src_offset
, NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
|
607 NOUVEAU_BO_RD
| NOUVEAU_BO_LOW
, 0, 0);
609 /* vertex array limits */
610 so_method(vtxbuf
, tesla
, NV50TCL_VERTEX_ARRAY_LIMIT_HIGH(i
), 2);
611 so_reloc (vtxbuf
, bo
, vb
->buffer
->size
- 1,
612 NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
| NOUVEAU_BO_RD
|
613 NOUVEAU_BO_HIGH
, 0, 0);
614 so_reloc (vtxbuf
, bo
, vb
->buffer
->size
- 1,
615 NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
| NOUVEAU_BO_RD
|
616 NOUVEAU_BO_LOW
, 0, 0);
618 for (; i
< n_ve
; ++i
) {
619 so_data (vtxfmt
, 0x7e080010);
621 so_method(vtxbuf
, tesla
, NV50TCL_VERTEX_ARRAY_FORMAT(i
), 1);
624 nv50
->state
.vtxelt_nr
= nv50
->vtxelt
->num_elements
;
626 so_ref (vtxbuf
, &nv50
->state
.vtxbuf
);
627 so_ref (vtxattr
, &nv50
->state
.vtxattr
);
628 so_ref (NULL
, &vtxbuf
);
629 so_ref (NULL
, &vtxattr
);