1 /**************************************************************************
3 Copyright 2002-2008 VMware, Inc.
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46 #include "util/u_memory.h"
49 #include "vbo_private.h"
52 /** ID/name for immediate-mode VBO */
53 #define IMM_BUFFER_NAME 0xaabbccdd
57 vbo_reset_all_attr(struct vbo_exec_context
*exec
);
61 * Close off the last primitive, execute the buffer, restart the
62 * primitive. This is called when we fill a vertex buffer before
66 vbo_exec_wrap_buffers(struct vbo_exec_context
*exec
)
68 if (exec
->vtx
.prim_count
== 0) {
69 exec
->vtx
.copied
.nr
= 0;
70 exec
->vtx
.vert_count
= 0;
71 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
74 struct _mesa_prim
*last_prim
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
75 const GLuint last_begin
= last_prim
->begin
;
78 if (_mesa_inside_begin_end(exec
->ctx
)) {
79 last_prim
->count
= exec
->vtx
.vert_count
- last_prim
->start
;
82 last_count
= last_prim
->count
;
84 /* Special handling for wrapping GL_LINE_LOOP */
85 if (last_prim
->mode
== GL_LINE_LOOP
&&
88 /* draw this section of the incomplete line loop as a line strip */
89 last_prim
->mode
= GL_LINE_STRIP
;
90 if (!last_prim
->begin
) {
91 /* This is not the first section of the line loop, so don't
92 * draw the 0th vertex. We're saving it until we draw the
93 * very last section of the loop.
100 /* Execute the buffer and save copied vertices.
102 if (exec
->vtx
.vert_count
)
103 vbo_exec_vtx_flush(exec
);
105 exec
->vtx
.prim_count
= 0;
106 exec
->vtx
.copied
.nr
= 0;
109 /* Emit a glBegin to start the new list.
111 assert(exec
->vtx
.prim_count
== 0);
113 if (_mesa_inside_begin_end(exec
->ctx
)) {
114 exec
->vtx
.prim
[0].mode
= exec
->ctx
->Driver
.CurrentExecPrimitive
;
115 exec
->vtx
.prim
[0].begin
= 0;
116 exec
->vtx
.prim
[0].end
= 0;
117 exec
->vtx
.prim
[0].start
= 0;
118 exec
->vtx
.prim
[0].count
= 0;
119 exec
->vtx
.prim_count
++;
121 if (exec
->vtx
.copied
.nr
== last_count
)
122 exec
->vtx
.prim
[0].begin
= last_begin
;
129 * Deal with buffer wrapping where provoked by the vertex buffer
130 * filling up, as opposed to upgrade_vertex().
133 vbo_exec_vtx_wrap(struct vbo_exec_context
*exec
)
135 unsigned numComponents
;
137 /* Run pipeline on current vertices, copy wrapped vertices
138 * to exec->vtx.copied.
140 vbo_exec_wrap_buffers(exec
);
142 if (!exec
->vtx
.buffer_ptr
) {
143 /* probably ran out of memory earlier when allocating the VBO */
147 /* Copy stored stored vertices to start of new list.
149 assert(exec
->vtx
.max_vert
- exec
->vtx
.vert_count
> exec
->vtx
.copied
.nr
);
151 numComponents
= exec
->vtx
.copied
.nr
* exec
->vtx
.vertex_size
;
152 memcpy(exec
->vtx
.buffer_ptr
,
153 exec
->vtx
.copied
.buffer
,
154 numComponents
* sizeof(fi_type
));
155 exec
->vtx
.buffer_ptr
+= numComponents
;
156 exec
->vtx
.vert_count
+= exec
->vtx
.copied
.nr
;
158 exec
->vtx
.copied
.nr
= 0;
163 * Copy the active vertex's values to the ctx->Current fields.
166 vbo_exec_copy_to_current(struct vbo_exec_context
*exec
)
168 struct gl_context
*ctx
= exec
->ctx
;
169 struct vbo_context
*vbo
= vbo_context(ctx
);
170 GLbitfield64 enabled
= exec
->vtx
.enabled
& (~BITFIELD64_BIT(VBO_ATTRIB_POS
));
173 const int i
= u_bit_scan64(&enabled
);
175 /* Note: the exec->vtx.current[i] pointers point into the
176 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
178 GLfloat
*current
= (GLfloat
*)vbo
->current
[i
].Ptr
;
179 fi_type tmp
[8]; /* space for doubles */
182 if (exec
->vtx
.attr
[i
].type
== GL_DOUBLE
||
183 exec
->vtx
.attr
[i
].type
== GL_UNSIGNED_INT64_ARB
)
186 assert(exec
->vtx
.attr
[i
].size
);
188 if (exec
->vtx
.attr
[i
].type
== GL_DOUBLE
||
189 exec
->vtx
.attr
[i
].type
== GL_UNSIGNED_INT64_ARB
) {
190 memset(tmp
, 0, sizeof(tmp
));
191 memcpy(tmp
, exec
->vtx
.attrptr
[i
], exec
->vtx
.attr
[i
].size
* sizeof(GLfloat
));
193 COPY_CLEAN_4V_TYPE_AS_UNION(tmp
,
194 exec
->vtx
.attr
[i
].size
,
195 exec
->vtx
.attrptr
[i
],
196 exec
->vtx
.attr
[i
].type
);
199 if (exec
->vtx
.attr
[i
].type
!= vbo
->current
[i
].Format
.Type
||
200 memcmp(current
, tmp
, 4 * sizeof(GLfloat
) * dmul
) != 0) {
201 memcpy(current
, tmp
, 4 * sizeof(GLfloat
) * dmul
);
203 /* Given that we explicitly state size here, there is no need
204 * for the COPY_CLEAN above, could just copy 16 bytes and be
205 * done. The only problem is when Mesa accesses ctx->Current
208 /* Size here is in components - not bytes */
209 vbo_set_vertex_format(&vbo
->current
[i
].Format
,
210 exec
->vtx
.attr
[i
].size
/ dmul
,
211 exec
->vtx
.attr
[i
].type
);
213 /* This triggers rather too much recalculation of Mesa state
214 * that doesn't get used (eg light positions).
216 if (i
>= VBO_ATTRIB_MAT_FRONT_AMBIENT
&&
217 i
<= VBO_ATTRIB_MAT_BACK_INDEXES
)
218 ctx
->NewState
|= _NEW_LIGHT
;
220 ctx
->NewState
|= _NEW_CURRENT_ATTRIB
;
224 /* Colormaterial -- this kindof sucks.
226 if (ctx
->Light
.ColorMaterialEnabled
&&
227 exec
->vtx
.attr
[VBO_ATTRIB_COLOR0
].size
) {
228 _mesa_update_color_material(ctx
,
229 ctx
->Current
.Attrib
[VBO_ATTRIB_COLOR0
]);
235 * Flush existing data, set new attrib size, replay copied vertices.
236 * This is called when we transition from a small vertex attribute size
237 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
238 * We need to go back over the previous 2-component texcoords and insert
239 * zero and one values.
240 * \param attr VBO_ATTRIB_x vertex attribute value
243 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context
*exec
,
244 GLuint attr
, GLuint newSize
, GLenum newType
)
246 struct gl_context
*ctx
= exec
->ctx
;
247 struct vbo_context
*vbo
= vbo_context(ctx
);
248 const GLint lastcount
= exec
->vtx
.vert_count
;
249 fi_type
*old_attrptr
[VBO_ATTRIB_MAX
];
250 const GLuint old_vtx_size_no_pos
= exec
->vtx
.vertex_size_no_pos
;
251 const GLuint old_vtx_size
= exec
->vtx
.vertex_size
; /* floats per vertex */
252 const GLuint oldSize
= exec
->vtx
.attr
[attr
].size
;
255 assert(attr
< VBO_ATTRIB_MAX
);
257 /* Run pipeline on current vertices, copy wrapped vertices
258 * to exec->vtx.copied.
260 vbo_exec_wrap_buffers(exec
);
262 if (unlikely(exec
->vtx
.copied
.nr
)) {
263 /* We're in the middle of a primitive, keep the old vertex
264 * format around to be able to translate the copied vertices to
267 memcpy(old_attrptr
, exec
->vtx
.attrptr
, sizeof(old_attrptr
));
270 /* Heuristic: Attempt to isolate attributes received outside
271 * begin/end so that they don't bloat the vertices.
273 if (!_mesa_inside_begin_end(ctx
) &&
274 !oldSize
&& lastcount
> 8 && exec
->vtx
.vertex_size
) {
275 vbo_exec_copy_to_current(exec
);
276 vbo_reset_all_attr(exec
);
281 exec
->vtx
.attr
[attr
].size
= newSize
;
282 exec
->vtx
.attr
[attr
].active_size
= newSize
;
283 exec
->vtx
.attr
[attr
].type
= newType
;
284 exec
->vtx
.vertex_size
+= newSize
- oldSize
;
285 exec
->vtx
.vertex_size_no_pos
= exec
->vtx
.vertex_size
- exec
->vtx
.attr
[0].size
;
286 exec
->vtx
.max_vert
= vbo_compute_max_verts(exec
);
287 exec
->vtx
.vert_count
= 0;
288 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
289 exec
->vtx
.enabled
|= BITFIELD64_BIT(attr
);
292 if (unlikely(oldSize
)) {
293 unsigned offset
= exec
->vtx
.attrptr
[attr
] - exec
->vtx
.vertex
;
295 /* If there are attribs after the resized attrib... */
296 if (offset
+ oldSize
< old_vtx_size_no_pos
) {
297 int size_diff
= newSize
- oldSize
;
298 fi_type
*old_first
= exec
->vtx
.attrptr
[attr
] + oldSize
;
299 fi_type
*new_first
= exec
->vtx
.attrptr
[attr
] + newSize
;
300 fi_type
*old_last
= exec
->vtx
.vertex
+ old_vtx_size_no_pos
- 1;
301 fi_type
*new_last
= exec
->vtx
.vertex
+ exec
->vtx
.vertex_size_no_pos
- 1;
304 /* Decreasing the size: Copy from first to last to move
305 * elements to the left.
307 fi_type
*old_end
= old_last
+ 1;
308 fi_type
*old
= old_first
;
309 fi_type
*new = new_first
;
313 } while (old
!= old_end
);
315 /* Increasing the size: Copy from last to first to move
316 * elements to the right.
318 fi_type
*old_end
= old_first
- 1;
319 fi_type
*old
= old_last
;
320 fi_type
*new = new_last
;
324 } while (old
!= old_end
);
327 /* Update pointers to attribs, because we moved them. */
328 GLbitfield64 enabled
= exec
->vtx
.enabled
&
329 ~BITFIELD64_BIT(VBO_ATTRIB_POS
) &
330 ~BITFIELD64_BIT(attr
);
332 unsigned i
= u_bit_scan64(&enabled
);
334 if (exec
->vtx
.attrptr
[i
] > exec
->vtx
.attrptr
[attr
])
335 exec
->vtx
.attrptr
[i
] += size_diff
;
339 /* Just have to append the new attribute at the end */
340 exec
->vtx
.attrptr
[attr
] = exec
->vtx
.vertex
+
341 exec
->vtx
.vertex_size_no_pos
- newSize
;
345 /* The position is always last. */
346 exec
->vtx
.attrptr
[0] = exec
->vtx
.vertex
+ exec
->vtx
.vertex_size_no_pos
;
348 /* Replay stored vertices to translate them
349 * to new format here.
351 * -- No need to replay - just copy piecewise
353 if (unlikely(exec
->vtx
.copied
.nr
)) {
354 fi_type
*data
= exec
->vtx
.copied
.buffer
;
355 fi_type
*dest
= exec
->vtx
.buffer_ptr
;
357 assert(exec
->vtx
.buffer_ptr
== exec
->vtx
.buffer_map
);
359 for (i
= 0 ; i
< exec
->vtx
.copied
.nr
; i
++) {
360 GLbitfield64 enabled
= exec
->vtx
.enabled
;
362 const int j
= u_bit_scan64(&enabled
);
363 GLuint sz
= exec
->vtx
.attr
[j
].size
;
364 GLint old_offset
= old_attrptr
[j
] - exec
->vtx
.vertex
;
365 GLint new_offset
= exec
->vtx
.attrptr
[j
] - exec
->vtx
.vertex
;
372 COPY_CLEAN_4V_TYPE_AS_UNION(tmp
, oldSize
,
374 exec
->vtx
.attr
[j
].type
);
375 COPY_SZ_4V(dest
+ new_offset
, newSize
, tmp
);
377 fi_type
*current
= (fi_type
*)vbo
->current
[j
].Ptr
;
378 COPY_SZ_4V(dest
+ new_offset
, sz
, current
);
382 COPY_SZ_4V(dest
+ new_offset
, sz
, data
+ old_offset
);
386 data
+= old_vtx_size
;
387 dest
+= exec
->vtx
.vertex_size
;
390 exec
->vtx
.buffer_ptr
= dest
;
391 exec
->vtx
.vert_count
+= exec
->vtx
.copied
.nr
;
392 exec
->vtx
.copied
.nr
= 0;
398 * This is when a vertex attribute transitions to a different size.
399 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
400 * glTexCoord4f() call. We promote the array from size=2 to size=4.
401 * \param newSize size of new vertex (number of 32-bit words).
402 * \param attr VBO_ATTRIB_x vertex attribute value
405 vbo_exec_fixup_vertex(struct gl_context
*ctx
, GLuint attr
,
406 GLuint newSize
, GLenum newType
)
408 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
410 assert(attr
< VBO_ATTRIB_MAX
);
412 if (newSize
> exec
->vtx
.attr
[attr
].size
||
413 newType
!= exec
->vtx
.attr
[attr
].type
) {
414 /* New size is larger. Need to flush existing vertices and get
415 * an enlarged vertex format.
417 vbo_exec_wrap_upgrade_vertex(exec
, attr
, newSize
, newType
);
419 else if (newSize
< exec
->vtx
.attr
[attr
].active_size
) {
422 vbo_get_default_vals_as_union(exec
->vtx
.attr
[attr
].type
);
424 /* New size is smaller - just need to fill in some
425 * zeros. Don't need to flush or wrap.
427 for (i
= newSize
; i
<= exec
->vtx
.attr
[attr
].size
; i
++)
428 exec
->vtx
.attrptr
[attr
][i
-1] = id
[i
-1];
430 exec
->vtx
.attr
[attr
].active_size
= newSize
;
436 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
437 * It depends on a few things, including whether we're inside or outside
441 is_vertex_position(const struct gl_context
*ctx
, GLuint index
)
443 return (index
== 0 &&
444 _mesa_attr_zero_aliases_vertex(ctx
) &&
445 _mesa_inside_begin_end(ctx
));
448 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
449 #if UTIL_ARCH_LITTLE_ENDIAN
450 #define SET_64BIT(dst32, u64) do { \
451 *(dst32)++ = (u64); \
452 *(dst32)++ = (uint64_t)(u64) >> 32; \
455 #define SET_64BIT(dst32, u64) do { \
456 *(dst32)++ = (uint64_t)(u64) >> 32; \
457 *(dst32)++ = (u64); \
463 * This macro is used to implement all the glVertex, glColor, glTexCoord,
464 * glVertexAttrib, etc functions.
465 * \param A VBO_ATTRIB_x attribute index
466 * \param N attribute size (1..4)
467 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
468 * \param C cast type (uint32_t or uint64_t)
469 * \param V0, V1, v2, V3 attribute value
471 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
473 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
474 int sz = (sizeof(C) / sizeof(GLfloat)); \
476 assert(sz == 1 || sz == 2); \
478 /* store a copy of the attribute in exec except for glVertex */ \
480 /* Check if attribute size or type is changing. */ \
481 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
482 exec->vtx.attr[A].type != T)) { \
483 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
486 C *dest = (C *)exec->vtx.attrptr[A]; \
487 if (N>0) dest[0] = V0; \
488 if (N>1) dest[1] = V1; \
489 if (N>2) dest[2] = V2; \
490 if (N>3) dest[3] = V3; \
491 assert(exec->vtx.attr[A].type == T); \
493 /* we now have accumulated a per-vertex attribute */ \
494 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
496 /* This is a glVertex call */ \
497 int size = exec->vtx.attr[0].size; \
499 /* Check if attribute size or type is changing. */ \
500 if (unlikely(size < N * sz || \
501 exec->vtx.attr[0].type != T)) { \
502 vbo_exec_wrap_upgrade_vertex(exec, 0, N * sz, T); \
505 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
506 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
507 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
509 /* Copy over attributes from exec. */ \
510 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
513 /* Store the position, which is always last and can have 32 or */ \
514 /* 64 bits per channel. */ \
515 if (sizeof(C) == 4) { \
516 if (N > 0) *dst++ = V0; \
517 if (N > 1) *dst++ = V1; \
518 if (N > 2) *dst++ = V2; \
519 if (N > 3) *dst++ = V3; \
521 if (unlikely(N < size)) { \
522 if (N < 2 && size >= 2) *dst++ = V1; \
523 if (N < 3 && size >= 3) *dst++ = V2; \
524 if (N < 4 && size >= 4) *dst++ = V3; \
527 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
529 if (N > 0) SET_64BIT(dst, V0); \
530 if (N > 1) SET_64BIT(dst, V1); \
531 if (N > 2) SET_64BIT(dst, V2); \
532 if (N > 3) SET_64BIT(dst, V3); \
534 if (unlikely(N * 2 < size)) { \
535 if (N < 2 && size >= 4) SET_64BIT(dst, V1); \
536 if (N < 3 && size >= 6) SET_64BIT(dst, V2); \
537 if (N < 4 && size >= 8) SET_64BIT(dst, V3); \
541 /* dst now points at the beginning of the next vertex */ \
542 exec->vtx.buffer_ptr = (fi_type*)dst; \
544 /* Don't set FLUSH_UPDATE_CURRENT because */ \
545 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
547 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
548 vbo_exec_vtx_wrap(exec); \
554 #define ERROR(err) _mesa_error(ctx, err, __func__)
555 #define TAG(x) vbo_exec_##x
557 #include "vbo_attrib_tmp.h"
562 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
563 * this may be a (partial) no-op.
565 static void GLAPIENTRY
566 vbo_exec_Materialfv(GLenum face
, GLenum pname
, const GLfloat
*params
)
568 GLbitfield updateMats
;
569 GET_CURRENT_CONTEXT(ctx
);
571 /* This function should be a no-op when it tries to update material
572 * attributes which are currently tracking glColor via glColorMaterial.
573 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
574 * indicating which material attributes can actually be updated below.
576 if (ctx
->Light
.ColorMaterialEnabled
) {
577 updateMats
= ~ctx
->Light
._ColorMaterialBitmask
;
580 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
581 updateMats
= ALL_MATERIAL_BITS
;
584 if (ctx
->API
== API_OPENGL_COMPAT
&& face
== GL_FRONT
) {
585 updateMats
&= FRONT_MATERIAL_BITS
;
587 else if (ctx
->API
== API_OPENGL_COMPAT
&& face
== GL_BACK
) {
588 updateMats
&= BACK_MATERIAL_BITS
;
590 else if (face
!= GL_FRONT_AND_BACK
) {
591 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterial(invalid face)");
597 if (updateMats
& MAT_BIT_FRONT_EMISSION
)
598 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION
, 4, params
);
599 if (updateMats
& MAT_BIT_BACK_EMISSION
)
600 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION
, 4, params
);
603 if (updateMats
& MAT_BIT_FRONT_AMBIENT
)
604 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT
, 4, params
);
605 if (updateMats
& MAT_BIT_BACK_AMBIENT
)
606 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT
, 4, params
);
609 if (updateMats
& MAT_BIT_FRONT_DIFFUSE
)
610 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE
, 4, params
);
611 if (updateMats
& MAT_BIT_BACK_DIFFUSE
)
612 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE
, 4, params
);
615 if (updateMats
& MAT_BIT_FRONT_SPECULAR
)
616 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR
, 4, params
);
617 if (updateMats
& MAT_BIT_BACK_SPECULAR
)
618 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR
, 4, params
);
621 if (*params
< 0 || *params
> ctx
->Const
.MaxShininess
) {
622 _mesa_error(ctx
, GL_INVALID_VALUE
,
623 "glMaterial(invalid shininess: %f out range [0, %f])",
624 *params
, ctx
->Const
.MaxShininess
);
627 if (updateMats
& MAT_BIT_FRONT_SHININESS
)
628 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS
, 1, params
);
629 if (updateMats
& MAT_BIT_BACK_SHININESS
)
630 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS
, 1, params
);
632 case GL_COLOR_INDEXES
:
633 if (ctx
->API
!= API_OPENGL_COMPAT
) {
634 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterialfv(pname)");
637 if (updateMats
& MAT_BIT_FRONT_INDEXES
)
638 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES
, 3, params
);
639 if (updateMats
& MAT_BIT_BACK_INDEXES
)
640 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES
, 3, params
);
642 case GL_AMBIENT_AND_DIFFUSE
:
643 if (updateMats
& MAT_BIT_FRONT_AMBIENT
)
644 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT
, 4, params
);
645 if (updateMats
& MAT_BIT_FRONT_DIFFUSE
)
646 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE
, 4, params
);
647 if (updateMats
& MAT_BIT_BACK_AMBIENT
)
648 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT
, 4, params
);
649 if (updateMats
& MAT_BIT_BACK_DIFFUSE
)
650 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE
, 4, params
);
653 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterialfv(pname)");
660 * Flush (draw) vertices.
662 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
665 vbo_exec_FlushVertices_internal(struct vbo_exec_context
*exec
, unsigned flags
)
667 struct gl_context
*ctx
= exec
->ctx
;
669 if (flags
& FLUSH_STORED_VERTICES
) {
670 if (exec
->vtx
.vert_count
) {
671 vbo_exec_vtx_flush(exec
);
674 if (exec
->vtx
.vertex_size
) {
675 vbo_exec_copy_to_current(exec
);
676 vbo_reset_all_attr(exec
);
680 ctx
->Driver
.NeedFlush
= 0;
682 assert(flags
== FLUSH_UPDATE_CURRENT
);
684 /* Note that the vertex size is unchanged.
685 * (vbo_reset_all_attr isn't called)
687 vbo_exec_copy_to_current(exec
);
689 /* Only FLUSH_UPDATE_CURRENT is done. */
690 ctx
->Driver
.NeedFlush
= ~FLUSH_UPDATE_CURRENT
;
695 static void GLAPIENTRY
696 vbo_exec_EvalCoord1f(GLfloat u
)
698 GET_CURRENT_CONTEXT(ctx
);
699 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
703 if (exec
->eval
.recalculate_maps
)
704 vbo_exec_eval_update(exec
);
706 for (i
= 0; i
<= VBO_ATTRIB_TEX7
; i
++) {
707 if (exec
->eval
.map1
[i
].map
)
708 if (exec
->vtx
.attr
[i
].active_size
!= exec
->eval
.map1
[i
].sz
)
709 vbo_exec_fixup_vertex(ctx
, i
, exec
->eval
.map1
[i
].sz
, GL_FLOAT
);
713 memcpy(exec
->vtx
.copied
.buffer
, exec
->vtx
.vertex
,
714 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
716 vbo_exec_do_EvalCoord1f(exec
, u
);
718 memcpy(exec
->vtx
.vertex
, exec
->vtx
.copied
.buffer
,
719 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
723 static void GLAPIENTRY
724 vbo_exec_EvalCoord2f(GLfloat u
, GLfloat v
)
726 GET_CURRENT_CONTEXT(ctx
);
727 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
731 if (exec
->eval
.recalculate_maps
)
732 vbo_exec_eval_update(exec
);
734 for (i
= 0; i
<= VBO_ATTRIB_TEX7
; i
++) {
735 if (exec
->eval
.map2
[i
].map
)
736 if (exec
->vtx
.attr
[i
].active_size
!= exec
->eval
.map2
[i
].sz
)
737 vbo_exec_fixup_vertex(ctx
, i
, exec
->eval
.map2
[i
].sz
, GL_FLOAT
);
740 if (ctx
->Eval
.AutoNormal
)
741 if (exec
->vtx
.attr
[VBO_ATTRIB_NORMAL
].active_size
!= 3)
742 vbo_exec_fixup_vertex(ctx
, VBO_ATTRIB_NORMAL
, 3, GL_FLOAT
);
745 memcpy(exec
->vtx
.copied
.buffer
, exec
->vtx
.vertex
,
746 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
748 vbo_exec_do_EvalCoord2f(exec
, u
, v
);
750 memcpy(exec
->vtx
.vertex
, exec
->vtx
.copied
.buffer
,
751 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
755 static void GLAPIENTRY
756 vbo_exec_EvalCoord1fv(const GLfloat
*u
)
758 vbo_exec_EvalCoord1f(u
[0]);
762 static void GLAPIENTRY
763 vbo_exec_EvalCoord2fv(const GLfloat
*u
)
765 vbo_exec_EvalCoord2f(u
[0], u
[1]);
769 static void GLAPIENTRY
770 vbo_exec_EvalPoint1(GLint i
)
772 GET_CURRENT_CONTEXT(ctx
);
773 GLfloat du
= ((ctx
->Eval
.MapGrid1u2
- ctx
->Eval
.MapGrid1u1
) /
774 (GLfloat
) ctx
->Eval
.MapGrid1un
);
775 GLfloat u
= i
* du
+ ctx
->Eval
.MapGrid1u1
;
777 vbo_exec_EvalCoord1f(u
);
781 static void GLAPIENTRY
782 vbo_exec_EvalPoint2(GLint i
, GLint j
)
784 GET_CURRENT_CONTEXT(ctx
);
785 GLfloat du
= ((ctx
->Eval
.MapGrid2u2
- ctx
->Eval
.MapGrid2u1
) /
786 (GLfloat
) ctx
->Eval
.MapGrid2un
);
787 GLfloat dv
= ((ctx
->Eval
.MapGrid2v2
- ctx
->Eval
.MapGrid2v1
) /
788 (GLfloat
) ctx
->Eval
.MapGrid2vn
);
789 GLfloat u
= i
* du
+ ctx
->Eval
.MapGrid2u1
;
790 GLfloat v
= j
* dv
+ ctx
->Eval
.MapGrid2v1
;
792 vbo_exec_EvalCoord2f(u
, v
);
797 * Called via glBegin.
799 static void GLAPIENTRY
800 vbo_exec_Begin(GLenum mode
)
802 GET_CURRENT_CONTEXT(ctx
);
803 struct vbo_context
*vbo
= vbo_context(ctx
);
804 struct vbo_exec_context
*exec
= &vbo
->exec
;
807 if (_mesa_inside_begin_end(ctx
)) {
808 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glBegin");
812 if (!_mesa_valid_prim_mode(ctx
, mode
, "glBegin")) {
816 if (!_mesa_valid_to_render(ctx
, "glBegin")) {
820 /* Heuristic: attempt to isolate attributes occurring outside
823 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
824 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
826 if (exec
->vtx
.vertex_size
&& !exec
->vtx
.attr
[VBO_ATTRIB_POS
].size
)
827 vbo_exec_FlushVertices_internal(exec
, FLUSH_STORED_VERTICES
);
829 i
= exec
->vtx
.prim_count
++;
830 exec
->vtx
.prim
[i
].mode
= mode
;
831 exec
->vtx
.prim
[i
].begin
= 1;
832 exec
->vtx
.prim
[i
].end
= 0;
833 exec
->vtx
.prim
[i
].start
= exec
->vtx
.vert_count
;
834 exec
->vtx
.prim
[i
].count
= 0;
836 ctx
->Driver
.CurrentExecPrimitive
= mode
;
838 ctx
->Exec
= ctx
->BeginEnd
;
840 /* We may have been called from a display list, in which case we should
841 * leave dlist.c's dispatch table in place.
843 if (ctx
->CurrentClientDispatch
== ctx
->MarshalExec
) {
844 ctx
->CurrentServerDispatch
= ctx
->Exec
;
845 } else if (ctx
->CurrentClientDispatch
== ctx
->OutsideBeginEnd
) {
846 ctx
->CurrentClientDispatch
= ctx
->Exec
;
847 _glapi_set_dispatch(ctx
->CurrentClientDispatch
);
849 assert(ctx
->CurrentClientDispatch
== ctx
->Save
);
855 * Try to merge / concatenate the two most recent VBO primitives.
858 try_vbo_merge(struct vbo_exec_context
*exec
)
860 struct _mesa_prim
*cur
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
862 assert(exec
->vtx
.prim_count
>= 1);
864 vbo_try_prim_conversion(cur
);
866 if (exec
->vtx
.prim_count
>= 2) {
867 struct _mesa_prim
*prev
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 2];
868 assert(prev
== cur
- 1);
870 if (vbo_merge_draws(exec
->ctx
, false, prev
, cur
))
871 exec
->vtx
.prim_count
--; /* drop the last primitive */
879 static void GLAPIENTRY
882 GET_CURRENT_CONTEXT(ctx
);
883 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
885 if (!_mesa_inside_begin_end(ctx
)) {
886 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glEnd");
890 ctx
->Exec
= ctx
->OutsideBeginEnd
;
892 if (ctx
->CurrentClientDispatch
== ctx
->MarshalExec
) {
893 ctx
->CurrentServerDispatch
= ctx
->Exec
;
894 } else if (ctx
->CurrentClientDispatch
== ctx
->BeginEnd
) {
895 ctx
->CurrentClientDispatch
= ctx
->Exec
;
896 _glapi_set_dispatch(ctx
->CurrentClientDispatch
);
899 if (exec
->vtx
.prim_count
> 0) {
900 /* close off current primitive */
901 struct _mesa_prim
*last_prim
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
902 unsigned count
= exec
->vtx
.vert_count
- last_prim
->start
;
905 last_prim
->count
= count
;
908 ctx
->Driver
.NeedFlush
|= FLUSH_STORED_VERTICES
;
910 /* Special handling for GL_LINE_LOOP */
911 if (last_prim
->mode
== GL_LINE_LOOP
&& last_prim
->begin
== 0) {
912 /* We're finishing drawing a line loop. Append 0th vertex onto
913 * end of vertex buffer so we can draw it as a line strip.
915 const fi_type
*src
= exec
->vtx
.buffer_map
+
916 last_prim
->start
* exec
->vtx
.vertex_size
;
917 fi_type
*dst
= exec
->vtx
.buffer_map
+
918 exec
->vtx
.vert_count
* exec
->vtx
.vertex_size
;
920 /* copy 0th vertex to end of buffer */
921 memcpy(dst
, src
, exec
->vtx
.vertex_size
* sizeof(fi_type
));
923 last_prim
->start
++; /* skip vertex0 */
924 /* note that last_prim->count stays unchanged */
925 last_prim
->mode
= GL_LINE_STRIP
;
927 /* Increment the vertex count so the next primitive doesn't
928 * overwrite the last vertex which we just added.
930 exec
->vtx
.vert_count
++;
931 exec
->vtx
.buffer_ptr
+= exec
->vtx
.vertex_size
;
937 ctx
->Driver
.CurrentExecPrimitive
= PRIM_OUTSIDE_BEGIN_END
;
939 if (exec
->vtx
.prim_count
== VBO_MAX_PRIM
)
940 vbo_exec_vtx_flush(exec
);
942 if (MESA_DEBUG_FLAGS
& DEBUG_ALWAYS_FLUSH
) {
949 * Called via glPrimitiveRestartNV()
951 static void GLAPIENTRY
952 vbo_exec_PrimitiveRestartNV(void)
955 GET_CURRENT_CONTEXT(ctx
);
957 curPrim
= ctx
->Driver
.CurrentExecPrimitive
;
959 if (curPrim
== PRIM_OUTSIDE_BEGIN_END
) {
960 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glPrimitiveRestartNV");
964 vbo_exec_Begin(curPrim
);
970 vbo_exec_vtxfmt_init(struct vbo_exec_context
*exec
)
972 struct gl_context
*ctx
= exec
->ctx
;
973 GLvertexformat
*vfmt
= &exec
->vtxfmt
;
975 #define NAME_AE(x) _ae_##x
976 #define NAME_CALLLIST(x) _mesa_##x
977 #define NAME(x) vbo_exec_##x
978 #define NAME_ES(x) _es_##x
980 #include "vbo_init_tmp.h"
985 vbo_reset_all_attr(struct vbo_exec_context
*exec
)
987 while (exec
->vtx
.enabled
) {
988 const int i
= u_bit_scan64(&exec
->vtx
.enabled
);
990 /* Reset the vertex attribute by setting its size to zero. */
991 exec
->vtx
.attr
[i
].size
= 0;
992 exec
->vtx
.attr
[i
].type
= GL_FLOAT
;
993 exec
->vtx
.attr
[i
].active_size
= 0;
994 exec
->vtx
.attrptr
[i
] = NULL
;
997 exec
->vtx
.vertex_size
= 0;
1002 vbo_exec_vtx_init(struct vbo_exec_context
*exec
, bool use_buffer_objects
)
1004 struct gl_context
*ctx
= exec
->ctx
;
1006 if (use_buffer_objects
) {
1007 /* Use buffer objects for immediate mode. */
1008 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
1010 exec
->vtx
.bufferobj
= ctx
->Driver
.NewBufferObject(ctx
, IMM_BUFFER_NAME
);
1012 /* Map the buffer. */
1013 vbo_exec_vtx_map(exec
);
1014 assert(exec
->vtx
.buffer_ptr
);
1016 /* Use allocated memory for immediate mode. */
1017 exec
->vtx
.bufferobj
= NULL
;
1018 exec
->vtx
.buffer_map
=
1019 align_malloc(ctx
->Const
.glBeginEndBufferSize
, 64);
1020 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
1023 vbo_exec_vtxfmt_init(exec
);
1024 _mesa_noop_vtxfmt_init(ctx
, &exec
->vtxfmt_noop
);
1026 exec
->vtx
.enabled
= u_bit_consecutive64(0, VBO_ATTRIB_MAX
); /* reset all */
1027 vbo_reset_all_attr(exec
);
1032 vbo_exec_vtx_destroy(struct vbo_exec_context
*exec
)
1034 /* using a real VBO for vertex data */
1035 struct gl_context
*ctx
= exec
->ctx
;
1037 /* True VBOs should already be unmapped
1039 if (exec
->vtx
.buffer_map
) {
1040 assert(!exec
->vtx
.bufferobj
||
1041 exec
->vtx
.bufferobj
->Name
== IMM_BUFFER_NAME
);
1042 if (!exec
->vtx
.bufferobj
) {
1043 align_free(exec
->vtx
.buffer_map
);
1044 exec
->vtx
.buffer_map
= NULL
;
1045 exec
->vtx
.buffer_ptr
= NULL
;
1049 /* Free the vertex buffer. Unmap first if needed.
1051 if (exec
->vtx
.bufferobj
&&
1052 _mesa_bufferobj_mapped(exec
->vtx
.bufferobj
, MAP_INTERNAL
)) {
1053 ctx
->Driver
.UnmapBuffer(ctx
, exec
->vtx
.bufferobj
, MAP_INTERNAL
);
1055 _mesa_reference_buffer_object(ctx
, &exec
->vtx
.bufferobj
, NULL
);
1060 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1061 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1062 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1063 * __struct gl_contextRec::Current and gl_light_attrib::Material
1065 * Note that the default T&L engine never clears the
1066 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1068 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1071 vbo_exec_FlushVertices(struct gl_context
*ctx
, GLuint flags
)
1073 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
1076 /* debug check: make sure we don't get called recursively */
1077 exec
->flush_call_depth
++;
1078 assert(exec
->flush_call_depth
== 1);
1081 if (_mesa_inside_begin_end(ctx
)) {
1082 /* We've had glBegin but not glEnd! */
1084 exec
->flush_call_depth
--;
1085 assert(exec
->flush_call_depth
== 0);
1091 vbo_exec_FlushVertices_internal(exec
, flags
);
1094 exec
->flush_call_depth
--;
1095 assert(exec
->flush_call_depth
== 0);
1101 _es_Color4f(GLfloat r
, GLfloat g
, GLfloat b
, GLfloat a
)
1103 vbo_exec_Color4f(r
, g
, b
, a
);
1108 _es_Normal3f(GLfloat x
, GLfloat y
, GLfloat z
)
1110 vbo_exec_Normal3f(x
, y
, z
);
1115 _es_MultiTexCoord4f(GLenum target
, GLfloat s
, GLfloat t
, GLfloat r
, GLfloat q
)
1117 vbo_exec_MultiTexCoord4f(target
, s
, t
, r
, q
);
1122 _es_Materialfv(GLenum face
, GLenum pname
, const GLfloat
*params
)
1124 vbo_exec_Materialfv(face
, pname
, params
);
1129 _es_Materialf(GLenum face
, GLenum pname
, GLfloat param
)
1133 p
[1] = p
[2] = p
[3] = 0.0F
;
1134 vbo_exec_Materialfv(face
, pname
, p
);
1139 * A special version of glVertexAttrib4f that does not treat index 0 as
1143 VertexAttrib4f_nopos(GLuint index
, GLfloat x
, GLfloat y
, GLfloat z
, GLfloat w
)
1145 GET_CURRENT_CONTEXT(ctx
);
1146 if (index
< MAX_VERTEX_GENERIC_ATTRIBS
)
1147 ATTRF(VBO_ATTRIB_GENERIC0
+ index
, 4, x
, y
, z
, w
);
1149 ERROR(GL_INVALID_VALUE
);
1153 _es_VertexAttrib4f(GLuint index
, GLfloat x
, GLfloat y
, GLfloat z
, GLfloat w
)
1155 VertexAttrib4f_nopos(index
, x
, y
, z
, w
);
1160 _es_VertexAttrib1f(GLuint indx
, GLfloat x
)
1162 VertexAttrib4f_nopos(indx
, x
, 0.0f
, 0.0f
, 1.0f
);
1167 _es_VertexAttrib1fv(GLuint indx
, const GLfloat
* values
)
1169 VertexAttrib4f_nopos(indx
, values
[0], 0.0f
, 0.0f
, 1.0f
);
1174 _es_VertexAttrib2f(GLuint indx
, GLfloat x
, GLfloat y
)
1176 VertexAttrib4f_nopos(indx
, x
, y
, 0.0f
, 1.0f
);
1181 _es_VertexAttrib2fv(GLuint indx
, const GLfloat
* values
)
1183 VertexAttrib4f_nopos(indx
, values
[0], values
[1], 0.0f
, 1.0f
);
1188 _es_VertexAttrib3f(GLuint indx
, GLfloat x
, GLfloat y
, GLfloat z
)
1190 VertexAttrib4f_nopos(indx
, x
, y
, z
, 1.0f
);
1195 _es_VertexAttrib3fv(GLuint indx
, const GLfloat
* values
)
1197 VertexAttrib4f_nopos(indx
, values
[0], values
[1], values
[2], 1.0f
);
1202 _es_VertexAttrib4fv(GLuint indx
, const GLfloat
* values
)
1204 VertexAttrib4f_nopos(indx
, values
[0], values
[1], values
[2], values
[3]);