2 * Copyright (C) 2009-2010 Francisco Jerez.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_bufferobj.h"
28 #include "nouveau_util.h"
30 #include "main/bufferobj.h"
31 #include "main/image.h"
33 /* Arbitrary pushbuf length we can assume we can get with a single
35 #define PUSHBUF_DWORDS 65536
37 /* Functions to set up struct nouveau_array_state from something like
38 * a GL array or index buffer. */
41 vbo_init_array(struct nouveau_array_state
*a
, int attr
, int stride
,
42 int fields
, int type
, struct gl_buffer_object
*obj
,
43 const void *ptr
, GLboolean map
)
50 if (_mesa_is_bufferobj(obj
)) {
51 nouveau_bo_ref(to_nouveau_bufferobj(obj
)->bo
, &a
->bo
);
52 a
->offset
= (intptr_t)ptr
;
55 nouveau_bo_map(a
->bo
, NOUVEAU_BO_RD
);
56 a
->buf
= a
->bo
->map
+ a
->offset
;
62 nouveau_bo_ref(NULL
, &a
->bo
);
72 get_array_extract(a
, &a
->extract_u
, &a
->extract_f
);
76 vbo_deinit_array(struct nouveau_array_state
*a
)
80 nouveau_bo_unmap(a
->bo
);
81 nouveau_bo_ref(NULL
, &a
->bo
);
89 get_array_stride(struct gl_context
*ctx
, const struct gl_client_array
*a
)
91 struct nouveau_render_state
*render
= to_render_state(ctx
);
93 if (render
->mode
== VBO
&& !_mesa_is_bufferobj(a
->BufferObj
))
94 /* Pack client buffers. */
95 return align(_mesa_sizeof_type(a
->Type
) * a
->Size
, 4);
101 vbo_init_arrays(struct gl_context
*ctx
, const struct _mesa_index_buffer
*ib
,
102 const struct gl_client_array
**arrays
)
104 struct nouveau_render_state
*render
= to_render_state(ctx
);
108 vbo_init_array(&render
->ib
, 0, 0, ib
->count
, ib
->type
,
109 ib
->obj
, ib
->ptr
, GL_TRUE
);
111 for (i
= 0; i
< render
->attr_count
; i
++) {
112 int attr
= render
->map
[i
];
115 const struct gl_client_array
*array
= arrays
[attr
];
117 vbo_init_array(&render
->attrs
[attr
], attr
,
118 get_array_stride(ctx
, array
),
119 array
->Size
, array
->Type
,
120 array
->BufferObj
, array
->Ptr
,
121 render
->mode
== IMM
);
127 vbo_deinit_arrays(struct gl_context
*ctx
, const struct _mesa_index_buffer
*ib
,
128 const struct gl_client_array
**arrays
)
130 struct nouveau_render_state
*render
= to_render_state(ctx
);
134 vbo_deinit_array(&render
->ib
);
136 for (i
= 0; i
< render
->attr_count
; i
++) {
137 int *attr
= &render
->map
[i
];
140 vbo_deinit_array(&render
->attrs
[*attr
]);
145 render
->attr_count
= 0;
146 context_bctx(ctx
, VERTEX
);
149 /* Make some rendering decisions from the GL context. */
152 vbo_choose_render_mode(struct gl_context
*ctx
, const struct gl_client_array
**arrays
)
154 struct nouveau_render_state
*render
= to_render_state(ctx
);
159 if (ctx
->Light
.Enabled
) {
160 for (i
= 0; i
< MAT_ATTRIB_MAX
; i
++) {
161 if (arrays
[VERT_ATTRIB_GENERIC0
+ i
]->StrideB
) {
168 if (render
->mode
== VBO
)
169 render
->attr_count
= NUM_VERTEX_ATTRS
;
171 render
->attr_count
= 0;
175 vbo_emit_attr(struct gl_context
*ctx
, const struct gl_client_array
**arrays
, int attr
)
177 struct nouveau_channel
*chan
= context_chan(ctx
);
178 struct nouveau_render_state
*render
= to_render_state(ctx
);
179 const struct gl_client_array
*array
= arrays
[attr
];
180 struct nouveau_array_state
*a
= &render
->attrs
[attr
];
183 if (!array
->StrideB
) {
184 if (attr
>= VERT_ATTRIB_GENERIC0
)
185 /* nouveau_update_state takes care of materials. */
188 /* Constant attribute. */
189 vbo_init_array(a
, attr
, array
->StrideB
, array
->Size
,
190 array
->Type
, array
->BufferObj
, array
->Ptr
,
196 /* Varying attribute. */
197 struct nouveau_attr_info
*info
= &TAG(vertex_attrs
)[attr
];
199 if (render
->mode
== VBO
) {
200 render
->map
[info
->vbo_index
] = attr
;
201 render
->vertex_size
+= array
->_ElementSize
;
203 render
->map
[render
->attr_count
++] = attr
;
204 render
->vertex_size
+= 4 * info
->imm_fields
;
209 #define MAT(a) (VERT_ATTRIB_GENERIC0 + MAT_ATTRIB_##a)
212 vbo_choose_attrs(struct gl_context
*ctx
, const struct gl_client_array
**arrays
)
214 struct nouveau_render_state
*render
= to_render_state(ctx
);
217 /* Reset the vertex size. */
218 render
->vertex_size
= 0;
220 vbo_emit_attr(ctx
, arrays
, VERT_ATTRIB_COLOR0
);
221 if (ctx
->Fog
.ColorSumEnabled
&& !ctx
->Light
.Enabled
)
222 vbo_emit_attr(ctx
, arrays
, VERT_ATTRIB_COLOR1
);
224 for (i
= 0; i
< ctx
->Const
.MaxTextureCoordUnits
; i
++) {
225 if (ctx
->Texture
._EnabledCoordUnits
& (1 << i
))
226 vbo_emit_attr(ctx
, arrays
, VERT_ATTRIB_TEX0
+ i
);
229 if (ctx
->Fog
.Enabled
&& ctx
->Fog
.FogCoordinateSource
== GL_FOG_COORD
)
230 vbo_emit_attr(ctx
, arrays
, VERT_ATTRIB_FOG
);
232 if (ctx
->Light
.Enabled
||
233 (ctx
->Texture
._GenFlags
& TEXGEN_NEED_NORMALS
))
234 vbo_emit_attr(ctx
, arrays
, VERT_ATTRIB_NORMAL
);
236 if (ctx
->Light
.Enabled
) {
237 vbo_emit_attr(ctx
, arrays
, MAT(FRONT_AMBIENT
));
238 vbo_emit_attr(ctx
, arrays
, MAT(FRONT_DIFFUSE
));
239 vbo_emit_attr(ctx
, arrays
, MAT(FRONT_SPECULAR
));
240 vbo_emit_attr(ctx
, arrays
, MAT(FRONT_SHININESS
));
242 if (ctx
->Light
.Model
.TwoSide
) {
243 vbo_emit_attr(ctx
, arrays
, MAT(BACK_AMBIENT
));
244 vbo_emit_attr(ctx
, arrays
, MAT(BACK_DIFFUSE
));
245 vbo_emit_attr(ctx
, arrays
, MAT(BACK_SPECULAR
));
246 vbo_emit_attr(ctx
, arrays
, MAT(BACK_SHININESS
));
250 vbo_emit_attr(ctx
, arrays
, VERT_ATTRIB_POS
);
254 get_max_client_stride(struct gl_context
*ctx
, const struct gl_client_array
**arrays
)
256 struct nouveau_render_state
*render
= to_render_state(ctx
);
259 for (i
= 0; i
< render
->attr_count
; i
++) {
260 int attr
= render
->map
[i
];
263 const struct gl_client_array
*a
= arrays
[attr
];
265 if (!_mesa_is_bufferobj(a
->BufferObj
))
266 s
= MAX2(s
, get_array_stride(ctx
, a
));
274 TAG(vbo_render_prims
)(struct gl_context
*ctx
, const struct gl_client_array
**arrays
,
275 const struct _mesa_prim
*prims
, GLuint nr_prims
,
276 const struct _mesa_index_buffer
*ib
,
277 GLboolean index_bounds_valid
,
278 GLuint min_index
, GLuint max_index
);
281 vbo_maybe_split(struct gl_context
*ctx
, const struct gl_client_array
**arrays
,
282 const struct _mesa_prim
*prims
, GLuint nr_prims
,
283 const struct _mesa_index_buffer
*ib
,
284 GLuint min_index
, GLuint max_index
)
286 struct nouveau_context
*nctx
= to_nouveau_context(ctx
);
287 struct nouveau_render_state
*render
= to_render_state(ctx
);
288 unsigned pushbuf_avail
= PUSHBUF_DWORDS
- 2 * (nctx
->bo
.count
+
290 vert_avail
= get_max_vertices(ctx
, NULL
, pushbuf_avail
),
291 idx_avail
= get_max_vertices(ctx
, ib
, pushbuf_avail
);
294 /* Try to keep client buffers smaller than the scratch BOs. */
295 if (render
->mode
== VBO
&&
296 (stride
= get_max_client_stride(ctx
, arrays
)))
297 vert_avail
= MIN2(vert_avail
,
298 NOUVEAU_SCRATCH_SIZE
/ stride
);
300 if (max_index
- min_index
> vert_avail
||
301 (ib
&& ib
->count
> idx_avail
)) {
302 struct split_limits limits
= {
303 .max_verts
= vert_avail
,
304 .max_indices
= idx_avail
,
308 vbo_split_prims(ctx
, arrays
, prims
, nr_prims
, ib
, min_index
,
309 max_index
, TAG(vbo_render_prims
), &limits
);
316 /* VBO rendering path. */
319 vbo_bind_vertices(struct gl_context
*ctx
, const struct gl_client_array
**arrays
,
320 GLint basevertex
, GLuint min_index
, GLuint max_index
)
322 struct nouveau_render_state
*render
= to_render_state(ctx
);
325 for (i
= 0; i
< NUM_VERTEX_ATTRS
; i
++) {
326 int attr
= render
->map
[i
];
329 const struct gl_client_array
*array
= arrays
[attr
];
330 struct nouveau_array_state
*a
= &render
->attrs
[attr
];
331 unsigned delta
= (basevertex
+ min_index
)
335 /* Array in a buffer obj. */
336 a
->offset
= (intptr_t)array
->Ptr
+ delta
;
338 int j
, n
= max_index
- min_index
+ 1;
339 char *sp
= (char *)array
->Ptr
+ delta
;
340 char *dp
= nouveau_get_scratch(
341 ctx
, n
* a
->stride
, &a
->bo
, &a
->offset
);
343 /* Array in client memory, move it to
344 * a scratch buffer obj. */
345 for (j
= 0; j
< n
; j
++)
346 memcpy(dp
+ j
* a
->stride
,
347 sp
+ j
* array
->StrideB
,
353 TAG(render_bind_vertices
)(ctx
);
357 vbo_draw_vbo(struct gl_context
*ctx
, const struct gl_client_array
**arrays
,
358 const struct _mesa_prim
*prims
, GLuint nr_prims
,
359 const struct _mesa_index_buffer
*ib
, GLuint min_index
,
362 struct nouveau_channel
*chan
= context_chan(ctx
);
364 int delta
= -min_index
, basevertex
= 0, i
;
367 get_array_dispatch(&to_render_state(ctx
)->ib
, &dispatch
);
369 TAG(render_set_format
)(ctx
);
371 for (i
= 0; i
< nr_prims
; i
++) {
372 unsigned start
= prims
[i
].start
,
373 count
= prims
[i
].count
;
375 if (i
== 0 || basevertex
!= prims
[i
].basevertex
) {
376 basevertex
= prims
[i
].basevertex
;
377 vbo_bind_vertices(ctx
, arrays
, basevertex
,
378 min_index
, max_index
);
381 if (count
> get_max_vertices(ctx
, ib
, AVAIL_RING(chan
)))
382 WAIT_RING(chan
, PUSHBUF_DWORDS
);
384 BATCH_BEGIN(nvgl_primitive(prims
[i
].mode
));
385 dispatch(ctx
, start
, delta
, count
);
390 /* Immediate rendering path. */
393 extract_id(struct nouveau_array_state
*a
, int i
, int j
)
399 vbo_draw_imm(struct gl_context
*ctx
, const struct gl_client_array
**arrays
,
400 const struct _mesa_prim
*prims
, GLuint nr_prims
,
401 const struct _mesa_index_buffer
*ib
, GLuint min_index
,
404 struct nouveau_render_state
*render
= to_render_state(ctx
);
405 struct nouveau_channel
*chan
= context_chan(ctx
);
406 extract_u_t extract
= ib
? render
->ib
.extract_u
: extract_id
;
410 for (i
= 0; i
< nr_prims
; i
++) {
411 unsigned start
= prims
[i
].start
,
412 end
= start
+ prims
[i
].count
;
414 if (prims
[i
].count
> get_max_vertices(ctx
, ib
,
416 WAIT_RING(chan
, PUSHBUF_DWORDS
);
418 BATCH_BEGIN(nvgl_primitive(prims
[i
].mode
));
420 for (; start
< end
; start
++) {
421 j
= prims
[i
].basevertex
+
422 extract(&render
->ib
, 0, start
);
424 for (k
= 0; k
< render
->attr_count
; k
++)
425 EMIT_IMM(ctx
, &render
->attrs
[render
->map
[k
]],
433 /* draw_prims entry point when we're doing hw-tnl. */
436 TAG(vbo_render_prims
)(struct gl_context
*ctx
, const struct gl_client_array
**arrays
,
437 const struct _mesa_prim
*prims
, GLuint nr_prims
,
438 const struct _mesa_index_buffer
*ib
,
439 GLboolean index_bounds_valid
,
440 GLuint min_index
, GLuint max_index
)
442 struct nouveau_render_state
*render
= to_render_state(ctx
);
444 if (!index_bounds_valid
)
445 vbo_get_minmax_index(ctx
, prims
, ib
, &min_index
, &max_index
);
447 vbo_choose_render_mode(ctx
, arrays
);
448 vbo_choose_attrs(ctx
, arrays
);
450 if (vbo_maybe_split(ctx
, arrays
, prims
, nr_prims
, ib
, min_index
,
454 vbo_init_arrays(ctx
, ib
, arrays
);
456 if (render
->mode
== VBO
)
457 vbo_draw_vbo(ctx
, arrays
, prims
, nr_prims
, ib
, min_index
,
460 vbo_draw_imm(ctx
, arrays
, prims
, nr_prims
, ib
, min_index
,
463 vbo_deinit_arrays(ctx
, ib
, arrays
);