1 /**************************************************************************
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Build post-transformation, post-clipping vertex buffers and element
31 * lists by hooking into the end of the primitive pipeline and
32 * manipulating the vertex_id field in the vertex headers.
34 * XXX: work in progress
36 * \author José Fonseca <jrfonseca@tungstengraphics.com>
37 * \author Keith Whitwell <keith@tungstengraphics.com>
41 #include "util/u_debug.h"
42 #include "pipe/p_inlines.h"
43 #include "pipe/internal/p_winsys_screen.h"
45 #include "nv20_context.h"
46 #include "nv20_state.h"
48 #include "draw/draw_vbuf.h"
51 * Primitive renderer for nv20.
53 struct nv20_vbuf_render
{
54 struct vbuf_render base
;
56 struct nv20_context
*nv20
;
58 /** Vertex buffer in VRAM */
59 struct pipe_buffer
*pbuffer
;
61 /** Vertex buffer in normal memory */
64 /** Vertex size in bytes */
65 /*unsigned vertex_size;*/
67 /** Hardware primitive */
72 * Basically a cast wrapper.
74 static INLINE
struct nv20_vbuf_render
*
75 nv20_vbuf_render(struct vbuf_render
*render
)
78 return (struct nv20_vbuf_render
*)render
;
81 void nv20_vtxbuf_bind( struct nv20_context
* nv20
)
85 for(i
= 0; i
< NV20TCL_VTXBUF_ADDRESS__SIZE
; i
++) {
86 BEGIN_RING(kelvin
, NV20TCL_VTXBUF_ADDRESS(i
), 1);
87 OUT_RING(0/*nv20->vtxbuf*/);
88 BEGIN_RING(kelvin
, NV20TCL_VTXFMT(i
) ,1);
94 static const struct vertex_info
*
95 nv20_vbuf_render_get_vertex_info( struct vbuf_render
*render
)
97 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
98 struct nv20_context
*nv20
= nv20_render
->nv20
;
100 nv20_emit_hw_state(nv20
);
102 return &nv20
->vertex_info
;
106 nv20__allocate_mbuffer(struct nv20_vbuf_render
*nv20_render
, size_t size
)
108 nv20_render
->mbuffer
= MALLOC(size
);
109 return nv20_render
->mbuffer
;
113 nv20__allocate_pbuffer(struct nv20_vbuf_render
*nv20_render
, size_t size
)
115 struct pipe_screen
*screen
= nv20_render
->nv20
->pipe
.screen
;
116 nv20_render
->pbuffer
= screen
->buffer_create(screen
, 64,
117 PIPE_BUFFER_USAGE_VERTEX
, size
);
121 nv20_vbuf_render_allocate_vertices( struct vbuf_render
*render
,
125 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
126 size_t size
= (size_t)vertex_size
* (size_t)nr_vertices
;
129 assert(!nv20_render
->pbuffer
);
130 assert(!nv20_render
->mbuffer
);
133 * For small amount of vertices, don't bother with pipe vertex
134 * buffer, the data will be passed directly via the fifo.
136 /* XXX: Pipe vertex buffers don't work. */
137 if (0 && size
> 16 * 1024) {
138 nv20__allocate_pbuffer(nv20_render
, size
);
139 /* umm yeah so this is ugly */
140 buf
= nv20_render
->pbuffer
;
142 buf
= nv20__allocate_mbuffer(nv20_render
, size
);
146 nv20_render
->nv20
->dirty
|= NV20_NEW_VTXARRAYS
;
148 return buf
? TRUE
: FALSE
;
152 nv20_vbuf_render_map_vertices( struct vbuf_render
*render
)
154 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
155 struct pipe_winsys
*winsys
= nv20_render
->nv20
->pipe
.winsys
;
157 if (nv20_render
->pbuffer
) {
158 return winsys
->buffer_map(winsys
,
159 nv20_render
->pbuffer
,
160 PIPE_BUFFER_USAGE_CPU_WRITE
);
161 } else if (nv20_render
->mbuffer
) {
162 return nv20_render
->mbuffer
;
166 /* warnings be gone */
171 nv20_vbuf_render_unmap_vertices( struct vbuf_render
*render
,
175 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
176 struct pipe_winsys
*winsys
= nv20_render
->nv20
->pipe
.winsys
;
178 if (nv20_render
->pbuffer
)
179 winsys
->buffer_unmap(winsys
, nv20_render
->pbuffer
);
183 nv20_vbuf_render_set_primitive( struct vbuf_render
*render
,
186 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
187 unsigned hwp
= nvgl_primitive(prim
);
191 nv20_render
->hwprim
= hwp
;
196 nv20__vtxhwformat(unsigned stride
, unsigned fields
, unsigned type
)
198 return (stride
<< NV20TCL_VTXFMT_STRIDE_SHIFT
) |
199 (fields
<< NV20TCL_VTXFMT_SIZE_SHIFT
) |
200 (type
<< NV20TCL_VTXFMT_TYPE_SHIFT
);
204 nv20__emit_format(struct nv20_context
*nv20
, enum attrib_emit type
, int hwattr
)
211 hwfmt
= nv20__vtxhwformat(0, 0, 2);
215 hwfmt
= nv20__vtxhwformat(4, 1, 2);
219 hwfmt
= nv20__vtxhwformat(8, 2, 2);
223 hwfmt
= nv20__vtxhwformat(12, 3, 2);
227 hwfmt
= nv20__vtxhwformat(16, 4, 2);
231 NOUVEAU_ERR("unhandled attrib_emit %d\n", type
);
235 BEGIN_RING(kelvin
, NV20TCL_VTXFMT(hwattr
), 1);
241 nv20__emit_vertex_array_format(struct nv20_context
*nv20
)
243 struct vertex_info
*vinfo
= &nv20
->vertex_info
;
244 int hwattr
= NV20TCL_VTXFMT__SIZE
;
246 unsigned nr_fields
= 0;
248 while (hwattr
-- > 0) {
249 if (vinfo
->hwfmt
[0] & (1 << hwattr
)) {
250 nr_fields
+= nv20__emit_format(nv20
,
251 vinfo
->attrib
[attr
].emit
, hwattr
);
254 nv20__emit_format(nv20
, EMIT_OMIT
, hwattr
);
261 nv20__draw_mbuffer(struct nv20_vbuf_render
*nv20_render
,
262 const ushort
*indices
,
265 struct nv20_context
*nv20
= nv20_render
->nv20
;
266 struct vertex_info
*vinfo
= &nv20
->vertex_info
;
269 ubyte
*data
= nv20_render
->mbuffer
;
270 int vsz
= 4 * vinfo
->size
;
272 nr_fields
= nv20__emit_vertex_array_format(nv20
);
274 BEGIN_RING(kelvin
, NV20TCL_VERTEX_BEGIN_END
, 1);
275 OUT_RING(nv20_render
->hwprim
);
277 max_push
= 1200 / nr_fields
;
280 int push
= MIN2(nr_indices
, max_push
);
282 BEGIN_RING_NI(kelvin
, NV20TCL_VERTEX_DATA
, push
* nr_fields
);
283 for (i
= 0; i
< push
; i
++) {
284 /* XXX: fixme to handle other than floats? */
286 float *attrv
= (float*)&data
[indices
[i
] * vsz
];
295 BEGIN_RING(kelvin
, NV20TCL_VERTEX_BEGIN_END
, 1);
296 OUT_RING(NV20TCL_VERTEX_BEGIN_END_STOP
);
300 nv20__draw_pbuffer(struct nv20_vbuf_render
*nv20_render
,
301 const ushort
*indices
,
304 struct nv20_context
*nv20
= nv20_render
->nv20
;
307 NOUVEAU_ERR("nv20__draw_pbuffer: this path is broken.\n");
309 BEGIN_RING(kelvin
, NV10TCL_VERTEX_ARRAY_OFFSET_POS
, 1);
310 OUT_RELOCl(nv20_render
->pbuffer
, 0,
311 NOUVEAU_BO_VRAM
| NOUVEAU_BO_GART
| NOUVEAU_BO_RD
);
313 BEGIN_RING(kelvin
, NV10TCL_VERTEX_BUFFER_BEGIN_END
, 1);
314 OUT_RING(nv20_render
->hwprim
);
316 if (nr_indices
& 1) {
317 BEGIN_RING(kelvin
, NV10TCL_VB_ELEMENT_U32
, 1);
318 OUT_RING (indices
[0]);
319 indices
++; nr_indices
--;
323 // XXX too big/small ? check the size
324 push
= MIN2(nr_indices
, 1200 * 2);
326 BEGIN_RING_NI(kelvin
, NV10TCL_VB_ELEMENT_U16
, push
>> 1);
327 for (i
= 0; i
< push
; i
+=2)
328 OUT_RING((indices
[i
+1] << 16) | indices
[i
]);
334 BEGIN_RING(kelvin
, NV10TCL_VERTEX_BUFFER_BEGIN_END
, 1);
339 nv20_vbuf_render_draw( struct vbuf_render
*render
,
340 const ushort
*indices
,
343 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
345 nv20_emit_hw_state(nv20_render
->nv20
);
347 if (nv20_render
->pbuffer
)
348 nv20__draw_pbuffer(nv20_render
, indices
, nr_indices
);
349 else if (nv20_render
->mbuffer
)
350 nv20__draw_mbuffer(nv20_render
, indices
, nr_indices
);
357 nv20_vbuf_render_release_vertices( struct vbuf_render
*render
)
359 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
360 struct nv20_context
*nv20
= nv20_render
->nv20
;
361 struct pipe_screen
*pscreen
= &nv20
->screen
->pipe
;
363 if (nv20_render
->pbuffer
) {
364 pipe_buffer_reference(&nv20_render
->pbuffer
, NULL
);
365 } else if (nv20_render
->mbuffer
) {
366 FREE(nv20_render
->mbuffer
);
367 nv20_render
->mbuffer
= NULL
;
374 nv20_vbuf_render_destroy( struct vbuf_render
*render
)
376 struct nv20_vbuf_render
*nv20_render
= nv20_vbuf_render(render
);
378 assert(!nv20_render
->pbuffer
);
379 assert(!nv20_render
->mbuffer
);
386 * Create a new primitive render.
388 static struct vbuf_render
*
389 nv20_vbuf_render_create( struct nv20_context
*nv20
)
391 struct nv20_vbuf_render
*nv20_render
= CALLOC_STRUCT(nv20_vbuf_render
);
393 nv20_render
->nv20
= nv20
;
395 nv20_render
->base
.max_vertex_buffer_bytes
= 16*1024;
396 nv20_render
->base
.max_indices
= 1024;
397 nv20_render
->base
.get_vertex_info
= nv20_vbuf_render_get_vertex_info
;
398 nv20_render
->base
.allocate_vertices
=
399 nv20_vbuf_render_allocate_vertices
;
400 nv20_render
->base
.map_vertices
= nv20_vbuf_render_map_vertices
;
401 nv20_render
->base
.unmap_vertices
= nv20_vbuf_render_unmap_vertices
;
402 nv20_render
->base
.set_primitive
= nv20_vbuf_render_set_primitive
;
403 nv20_render
->base
.draw
= nv20_vbuf_render_draw
;
404 nv20_render
->base
.release_vertices
= nv20_vbuf_render_release_vertices
;
405 nv20_render
->base
.destroy
= nv20_vbuf_render_destroy
;
407 return &nv20_render
->base
;
412 * Create a new primitive vbuf/render stage.
414 struct draw_stage
*nv20_draw_vbuf_stage( struct nv20_context
*nv20
)
416 struct vbuf_render
*render
;
417 struct draw_stage
*stage
;
419 render
= nv20_vbuf_render_create(nv20
);
423 stage
= draw_vbuf_stage( nv20
->draw
, render
);
425 render
->destroy(render
);