1 /**************************************************************************
3 Copyright 2002-2008 VMware, Inc.
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
48 #include "vbo_private.h"
51 /** ID/name for immediate-mode VBO */
52 #define IMM_BUFFER_NAME 0xaabbccdd
56 vbo_reset_all_attr(struct vbo_exec_context
*exec
);
60 * Close off the last primitive, execute the buffer, restart the
61 * primitive. This is called when we fill a vertex buffer before
65 vbo_exec_wrap_buffers(struct vbo_exec_context
*exec
)
67 if (exec
->vtx
.prim_count
== 0) {
68 exec
->vtx
.copied
.nr
= 0;
69 exec
->vtx
.vert_count
= 0;
70 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
73 struct _mesa_prim
*last_prim
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
74 const GLuint last_begin
= last_prim
->begin
;
77 if (_mesa_inside_begin_end(exec
->ctx
)) {
78 last_prim
->count
= exec
->vtx
.vert_count
- last_prim
->start
;
81 last_count
= last_prim
->count
;
83 /* Special handling for wrapping GL_LINE_LOOP */
84 if (last_prim
->mode
== GL_LINE_LOOP
&&
87 /* draw this section of the incomplete line loop as a line strip */
88 last_prim
->mode
= GL_LINE_STRIP
;
89 if (!last_prim
->begin
) {
90 /* This is not the first section of the line loop, so don't
91 * draw the 0th vertex. We're saving it until we draw the
92 * very last section of the loop.
99 /* Execute the buffer and save copied vertices.
101 if (exec
->vtx
.vert_count
)
102 vbo_exec_vtx_flush(exec
, GL_FALSE
);
104 exec
->vtx
.prim_count
= 0;
105 exec
->vtx
.copied
.nr
= 0;
108 /* Emit a glBegin to start the new list.
110 assert(exec
->vtx
.prim_count
== 0);
112 if (_mesa_inside_begin_end(exec
->ctx
)) {
113 exec
->vtx
.prim
[0].mode
= exec
->ctx
->Driver
.CurrentExecPrimitive
;
114 exec
->vtx
.prim
[0].begin
= 0;
115 exec
->vtx
.prim
[0].end
= 0;
116 exec
->vtx
.prim
[0].start
= 0;
117 exec
->vtx
.prim
[0].count
= 0;
118 exec
->vtx
.prim_count
++;
120 if (exec
->vtx
.copied
.nr
== last_count
)
121 exec
->vtx
.prim
[0].begin
= last_begin
;
128 * Deal with buffer wrapping where provoked by the vertex buffer
129 * filling up, as opposed to upgrade_vertex().
132 vbo_exec_vtx_wrap(struct vbo_exec_context
*exec
)
134 unsigned numComponents
;
136 /* Run pipeline on current vertices, copy wrapped vertices
137 * to exec->vtx.copied.
139 vbo_exec_wrap_buffers(exec
);
141 if (!exec
->vtx
.buffer_ptr
) {
142 /* probably ran out of memory earlier when allocating the VBO */
146 /* Copy stored stored vertices to start of new list.
148 assert(exec
->vtx
.max_vert
- exec
->vtx
.vert_count
> exec
->vtx
.copied
.nr
);
150 numComponents
= exec
->vtx
.copied
.nr
* exec
->vtx
.vertex_size
;
151 memcpy(exec
->vtx
.buffer_ptr
,
152 exec
->vtx
.copied
.buffer
,
153 numComponents
* sizeof(fi_type
));
154 exec
->vtx
.buffer_ptr
+= numComponents
;
155 exec
->vtx
.vert_count
+= exec
->vtx
.copied
.nr
;
157 exec
->vtx
.copied
.nr
= 0;
162 * Copy the active vertex's values to the ctx->Current fields.
165 vbo_exec_copy_to_current(struct vbo_exec_context
*exec
)
167 struct gl_context
*ctx
= exec
->ctx
;
168 struct vbo_context
*vbo
= vbo_context(ctx
);
169 GLbitfield64 enabled
= exec
->vtx
.enabled
& (~BITFIELD64_BIT(VBO_ATTRIB_POS
));
172 const int i
= u_bit_scan64(&enabled
);
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
177 GLfloat
*current
= (GLfloat
*)vbo
->current
[i
].Ptr
;
178 fi_type tmp
[8]; /* space for doubles */
181 if (exec
->vtx
.attrtype
[i
] == GL_DOUBLE
||
182 exec
->vtx
.attrtype
[i
] == GL_UNSIGNED_INT64_ARB
)
185 assert(exec
->vtx
.attrsz
[i
]);
187 if (exec
->vtx
.attrtype
[i
] == GL_DOUBLE
||
188 exec
->vtx
.attrtype
[i
] == GL_UNSIGNED_INT64_ARB
) {
189 memset(tmp
, 0, sizeof(tmp
));
190 memcpy(tmp
, exec
->vtx
.attrptr
[i
], exec
->vtx
.attrsz
[i
] * sizeof(GLfloat
));
192 COPY_CLEAN_4V_TYPE_AS_UNION(tmp
,
194 exec
->vtx
.attrptr
[i
],
195 exec
->vtx
.attrtype
[i
]);
198 if (exec
->vtx
.attrtype
[i
] != vbo
->current
[i
].Format
.Type
||
199 memcmp(current
, tmp
, 4 * sizeof(GLfloat
) * dmul
) != 0) {
200 memcpy(current
, tmp
, 4 * sizeof(GLfloat
) * dmul
);
202 /* Given that we explicitly state size here, there is no need
203 * for the COPY_CLEAN above, could just copy 16 bytes and be
204 * done. The only problem is when Mesa accesses ctx->Current
207 /* Size here is in components - not bytes */
208 vbo_set_vertex_format(&vbo
->current
[i
].Format
,
209 exec
->vtx
.attrsz
[i
] / dmul
,
210 exec
->vtx
.attrtype
[i
]);
212 /* This triggers rather too much recalculation of Mesa state
213 * that doesn't get used (eg light positions).
215 if (i
>= VBO_ATTRIB_MAT_FRONT_AMBIENT
&&
216 i
<= VBO_ATTRIB_MAT_BACK_INDEXES
)
217 ctx
->NewState
|= _NEW_LIGHT
;
219 ctx
->NewState
|= _NEW_CURRENT_ATTRIB
;
223 /* Colormaterial -- this kindof sucks.
225 if (ctx
->Light
.ColorMaterialEnabled
&&
226 exec
->vtx
.attrsz
[VBO_ATTRIB_COLOR0
]) {
227 _mesa_update_color_material(ctx
,
228 ctx
->Current
.Attrib
[VBO_ATTRIB_COLOR0
]);
234 * Copy current vertex attribute values into the current vertex.
237 vbo_exec_copy_from_current(struct vbo_exec_context
*exec
)
239 struct gl_context
*ctx
= exec
->ctx
;
240 struct vbo_context
*vbo
= vbo_context(ctx
);
243 for (i
= VBO_ATTRIB_POS
+ 1; i
< VBO_ATTRIB_MAX
; i
++) {
244 if (exec
->vtx
.attrtype
[i
] == GL_DOUBLE
||
245 exec
->vtx
.attrtype
[i
] == GL_UNSIGNED_INT64_ARB
) {
246 memcpy(exec
->vtx
.attrptr
[i
], vbo
->current
[i
].Ptr
,
247 exec
->vtx
.attrsz
[i
] * sizeof(GLfloat
));
249 const fi_type
*current
= (fi_type
*) vbo
->current
[i
].Ptr
;
250 switch (exec
->vtx
.attrsz
[i
]) {
251 case 4: exec
->vtx
.attrptr
[i
][3] = current
[3];
252 case 3: exec
->vtx
.attrptr
[i
][2] = current
[2];
253 case 2: exec
->vtx
.attrptr
[i
][1] = current
[1];
254 case 1: exec
->vtx
.attrptr
[i
][0] = current
[0];
263 * Flush existing data, set new attrib size, replay copied vertices.
264 * This is called when we transition from a small vertex attribute size
265 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
266 * We need to go back over the previous 2-component texcoords and insert
267 * zero and one values.
268 * \param attr VBO_ATTRIB_x vertex attribute value
271 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context
*exec
,
272 GLuint attr
, GLuint newSize
)
274 struct gl_context
*ctx
= exec
->ctx
;
275 struct vbo_context
*vbo
= vbo_context(ctx
);
276 const GLint lastcount
= exec
->vtx
.vert_count
;
277 fi_type
*old_attrptr
[VBO_ATTRIB_MAX
];
278 const GLuint old_vtx_size
= exec
->vtx
.vertex_size
; /* floats per vertex */
279 const GLuint oldSize
= exec
->vtx
.attrsz
[attr
];
282 assert(attr
< VBO_ATTRIB_MAX
);
284 /* Run pipeline on current vertices, copy wrapped vertices
285 * to exec->vtx.copied.
287 vbo_exec_wrap_buffers(exec
);
289 if (unlikely(exec
->vtx
.copied
.nr
)) {
290 /* We're in the middle of a primitive, keep the old vertex
291 * format around to be able to translate the copied vertices to
294 memcpy(old_attrptr
, exec
->vtx
.attrptr
, sizeof(old_attrptr
));
297 if (unlikely(oldSize
)) {
298 /* Do a COPY_TO_CURRENT to ensure back-copying works for the
299 * case when the attribute already exists in the vertex and is
300 * having its size increased.
302 vbo_exec_copy_to_current(exec
);
305 /* Heuristic: Attempt to isolate attributes received outside
306 * begin/end so that they don't bloat the vertices.
308 if (!_mesa_inside_begin_end(ctx
) &&
309 !oldSize
&& lastcount
> 8 && exec
->vtx
.vertex_size
) {
310 vbo_exec_copy_to_current(exec
);
311 vbo_reset_all_attr(exec
);
316 exec
->vtx
.attrsz
[attr
] = newSize
;
317 exec
->vtx
.vertex_size
+= newSize
- oldSize
;
318 exec
->vtx
.max_vert
= vbo_compute_max_verts(exec
);
319 exec
->vtx
.vert_count
= 0;
320 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
321 exec
->vtx
.enabled
|= BITFIELD64_BIT(attr
);
323 if (unlikely(oldSize
)) {
324 /* Size changed, recalculate all the attrptr[] values
326 fi_type
*tmp
= exec
->vtx
.vertex
;
328 for (i
= 0 ; i
< VBO_ATTRIB_MAX
; i
++) {
329 if (exec
->vtx
.attrsz
[i
]) {
330 exec
->vtx
.attrptr
[i
] = tmp
;
331 tmp
+= exec
->vtx
.attrsz
[i
];
334 exec
->vtx
.attrptr
[i
] = NULL
; /* will not be dereferenced */
337 /* Copy from current to repopulate the vertex with correct
340 vbo_exec_copy_from_current(exec
);
343 /* Just have to append the new attribute at the end */
344 exec
->vtx
.attrptr
[attr
] = exec
->vtx
.vertex
+
345 exec
->vtx
.vertex_size
- newSize
;
348 /* Replay stored vertices to translate them
349 * to new format here.
351 * -- No need to replay - just copy piecewise
353 if (unlikely(exec
->vtx
.copied
.nr
)) {
354 fi_type
*data
= exec
->vtx
.copied
.buffer
;
355 fi_type
*dest
= exec
->vtx
.buffer_ptr
;
357 assert(exec
->vtx
.buffer_ptr
== exec
->vtx
.buffer_map
);
359 for (i
= 0 ; i
< exec
->vtx
.copied
.nr
; i
++) {
360 GLbitfield64 enabled
= exec
->vtx
.enabled
;
362 const int j
= u_bit_scan64(&enabled
);
363 GLuint sz
= exec
->vtx
.attrsz
[j
];
364 GLint old_offset
= old_attrptr
[j
] - exec
->vtx
.vertex
;
365 GLint new_offset
= exec
->vtx
.attrptr
[j
] - exec
->vtx
.vertex
;
372 COPY_CLEAN_4V_TYPE_AS_UNION(tmp
, oldSize
,
374 exec
->vtx
.attrtype
[j
]);
375 COPY_SZ_4V(dest
+ new_offset
, newSize
, tmp
);
377 fi_type
*current
= (fi_type
*)vbo
->current
[j
].Ptr
;
378 COPY_SZ_4V(dest
+ new_offset
, sz
, current
);
382 COPY_SZ_4V(dest
+ new_offset
, sz
, data
+ old_offset
);
386 data
+= old_vtx_size
;
387 dest
+= exec
->vtx
.vertex_size
;
390 exec
->vtx
.buffer_ptr
= dest
;
391 exec
->vtx
.vert_count
+= exec
->vtx
.copied
.nr
;
392 exec
->vtx
.copied
.nr
= 0;
398 * This is when a vertex attribute transitions to a different size.
399 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
400 * glTexCoord4f() call. We promote the array from size=2 to size=4.
401 * \param newSize size of new vertex (number of 32-bit words).
402 * \param attr VBO_ATTRIB_x vertex attribute value
405 vbo_exec_fixup_vertex(struct gl_context
*ctx
, GLuint attr
,
406 GLuint newSize
, GLenum newType
)
408 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
410 assert(attr
< VBO_ATTRIB_MAX
);
412 if (newSize
> exec
->vtx
.attrsz
[attr
] ||
413 newType
!= exec
->vtx
.attrtype
[attr
]) {
414 /* New size is larger. Need to flush existing vertices and get
415 * an enlarged vertex format.
417 vbo_exec_wrap_upgrade_vertex(exec
, attr
, newSize
);
419 else if (newSize
< exec
->vtx
.active_sz
[attr
]) {
422 vbo_get_default_vals_as_union(exec
->vtx
.attrtype
[attr
]);
424 /* New size is smaller - just need to fill in some
425 * zeros. Don't need to flush or wrap.
427 for (i
= newSize
; i
<= exec
->vtx
.attrsz
[attr
]; i
++)
428 exec
->vtx
.attrptr
[attr
][i
-1] = id
[i
-1];
431 exec
->vtx
.active_sz
[attr
] = newSize
;
432 exec
->vtx
.attrtype
[attr
] = newType
;
434 /* Does setting NeedFlush belong here? Necessitates resetting
435 * vtxfmt on each flush (otherwise flags won't get reset
439 ctx
->Driver
.NeedFlush
|= FLUSH_STORED_VERTICES
;
444 * Called upon first glVertex, glColor, glTexCoord, etc.
447 vbo_exec_begin_vertices(struct gl_context
*ctx
)
449 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
451 if (unlikely(!exec
->vtx
.buffer_ptr
))
452 vbo_exec_vtx_map(exec
);
454 assert((ctx
->Driver
.NeedFlush
& FLUSH_UPDATE_CURRENT
) == 0);
455 assert(exec
->begin_vertices_flags
);
457 ctx
->Driver
.NeedFlush
|= exec
->begin_vertices_flags
;
462 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
463 * It depends on a few things, including whether we're inside or outside
467 is_vertex_position(const struct gl_context
*ctx
, GLuint index
)
469 return (index
== 0 &&
470 _mesa_attr_zero_aliases_vertex(ctx
) &&
471 _mesa_inside_begin_end(ctx
));
476 * This macro is used to implement all the glVertex, glColor, glTexCoord,
477 * glVertexAttrib, etc functions.
478 * \param A VBO_ATTRIB_x attribute index
479 * \param N attribute size (1..4)
480 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
481 * \param C cast type (fi_type or double)
482 * \param V0, V1, v2, V3 attribute value
484 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
486 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
487 int sz = (sizeof(C) / sizeof(GLfloat)); \
489 assert(sz == 1 || sz == 2); \
491 /* check if attribute size or type is changing */ \
492 if (unlikely(exec->vtx.active_sz[A] != N * sz) || \
493 unlikely(exec->vtx.attrtype[A] != T)) { \
494 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
497 /* store vertex attribute in vertex buffer */ \
499 C *dest = (C *)exec->vtx.attrptr[A]; \
500 if (N>0) dest[0] = V0; \
501 if (N>1) dest[1] = V1; \
502 if (N>2) dest[2] = V2; \
503 if (N>3) dest[3] = V3; \
504 assert(exec->vtx.attrtype[A] == T); \
508 /* This is a glVertex call */ \
511 if (unlikely((ctx->Driver.NeedFlush & FLUSH_UPDATE_CURRENT) == 0)) { \
512 vbo_exec_begin_vertices(ctx); \
515 if (unlikely(!exec->vtx.buffer_ptr)) { \
516 vbo_exec_vtx_map(exec); \
518 assert(exec->vtx.buffer_ptr); \
520 /* copy 32-bit words */ \
521 for (i = 0; i < exec->vtx.vertex_size; i++) \
522 exec->vtx.buffer_ptr[i] = exec->vtx.vertex[i]; \
524 exec->vtx.buffer_ptr += exec->vtx.vertex_size; \
526 /* Set FLUSH_STORED_VERTICES to indicate that there's now */ \
527 /* something to draw (not just updating a color or texcoord).*/ \
528 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES; \
530 if (++exec->vtx.vert_count >= exec->vtx.max_vert) \
531 vbo_exec_vtx_wrap(exec); \
533 /* we now have accumulated per-vertex attributes */ \
534 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
540 #define ERROR(err) _mesa_error(ctx, err, __func__)
541 #define TAG(x) vbo_exec_##x
543 #include "vbo_attrib_tmp.h"
548 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
549 * this may be a (partial) no-op.
551 static void GLAPIENTRY
552 vbo_exec_Materialfv(GLenum face
, GLenum pname
, const GLfloat
*params
)
554 GLbitfield updateMats
;
555 GET_CURRENT_CONTEXT(ctx
);
557 /* This function should be a no-op when it tries to update material
558 * attributes which are currently tracking glColor via glColorMaterial.
559 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
560 * indicating which material attributes can actually be updated below.
562 if (ctx
->Light
.ColorMaterialEnabled
) {
563 updateMats
= ~ctx
->Light
._ColorMaterialBitmask
;
566 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
567 updateMats
= ALL_MATERIAL_BITS
;
570 if (ctx
->API
== API_OPENGL_COMPAT
&& face
== GL_FRONT
) {
571 updateMats
&= FRONT_MATERIAL_BITS
;
573 else if (ctx
->API
== API_OPENGL_COMPAT
&& face
== GL_BACK
) {
574 updateMats
&= BACK_MATERIAL_BITS
;
576 else if (face
!= GL_FRONT_AND_BACK
) {
577 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterial(invalid face)");
583 if (updateMats
& MAT_BIT_FRONT_EMISSION
)
584 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION
, 4, params
);
585 if (updateMats
& MAT_BIT_BACK_EMISSION
)
586 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION
, 4, params
);
589 if (updateMats
& MAT_BIT_FRONT_AMBIENT
)
590 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT
, 4, params
);
591 if (updateMats
& MAT_BIT_BACK_AMBIENT
)
592 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT
, 4, params
);
595 if (updateMats
& MAT_BIT_FRONT_DIFFUSE
)
596 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE
, 4, params
);
597 if (updateMats
& MAT_BIT_BACK_DIFFUSE
)
598 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE
, 4, params
);
601 if (updateMats
& MAT_BIT_FRONT_SPECULAR
)
602 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR
, 4, params
);
603 if (updateMats
& MAT_BIT_BACK_SPECULAR
)
604 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR
, 4, params
);
607 if (*params
< 0 || *params
> ctx
->Const
.MaxShininess
) {
608 _mesa_error(ctx
, GL_INVALID_VALUE
,
609 "glMaterial(invalid shininess: %f out range [0, %f])",
610 *params
, ctx
->Const
.MaxShininess
);
613 if (updateMats
& MAT_BIT_FRONT_SHININESS
)
614 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS
, 1, params
);
615 if (updateMats
& MAT_BIT_BACK_SHININESS
)
616 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS
, 1, params
);
618 case GL_COLOR_INDEXES
:
619 if (ctx
->API
!= API_OPENGL_COMPAT
) {
620 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterialfv(pname)");
623 if (updateMats
& MAT_BIT_FRONT_INDEXES
)
624 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES
, 3, params
);
625 if (updateMats
& MAT_BIT_BACK_INDEXES
)
626 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES
, 3, params
);
628 case GL_AMBIENT_AND_DIFFUSE
:
629 if (updateMats
& MAT_BIT_FRONT_AMBIENT
)
630 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT
, 4, params
);
631 if (updateMats
& MAT_BIT_FRONT_DIFFUSE
)
632 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE
, 4, params
);
633 if (updateMats
& MAT_BIT_BACK_AMBIENT
)
634 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT
, 4, params
);
635 if (updateMats
& MAT_BIT_BACK_DIFFUSE
)
636 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE
, 4, params
);
639 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterialfv(pname)");
646 * Flush (draw) vertices.
647 * \param unmap - leave VBO unmapped after flushing?
650 vbo_exec_FlushVertices_internal(struct vbo_exec_context
*exec
, GLboolean unmap
)
652 if (exec
->vtx
.vert_count
|| unmap
) {
653 vbo_exec_vtx_flush(exec
, unmap
);
656 if (exec
->vtx
.vertex_size
) {
657 vbo_exec_copy_to_current(exec
);
658 vbo_reset_all_attr(exec
);
663 static void GLAPIENTRY
664 vbo_exec_EvalCoord1f(GLfloat u
)
666 GET_CURRENT_CONTEXT(ctx
);
667 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
671 if (exec
->eval
.recalculate_maps
)
672 vbo_exec_eval_update(exec
);
674 for (i
= 0; i
<= VBO_ATTRIB_TEX7
; i
++) {
675 if (exec
->eval
.map1
[i
].map
)
676 if (exec
->vtx
.active_sz
[i
] != exec
->eval
.map1
[i
].sz
)
677 vbo_exec_fixup_vertex(ctx
, i
, exec
->eval
.map1
[i
].sz
, GL_FLOAT
);
681 memcpy(exec
->vtx
.copied
.buffer
, exec
->vtx
.vertex
,
682 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
684 vbo_exec_do_EvalCoord1f(exec
, u
);
686 memcpy(exec
->vtx
.vertex
, exec
->vtx
.copied
.buffer
,
687 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
691 static void GLAPIENTRY
692 vbo_exec_EvalCoord2f(GLfloat u
, GLfloat v
)
694 GET_CURRENT_CONTEXT(ctx
);
695 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
699 if (exec
->eval
.recalculate_maps
)
700 vbo_exec_eval_update(exec
);
702 for (i
= 0; i
<= VBO_ATTRIB_TEX7
; i
++) {
703 if (exec
->eval
.map2
[i
].map
)
704 if (exec
->vtx
.active_sz
[i
] != exec
->eval
.map2
[i
].sz
)
705 vbo_exec_fixup_vertex(ctx
, i
, exec
->eval
.map2
[i
].sz
, GL_FLOAT
);
708 if (ctx
->Eval
.AutoNormal
)
709 if (exec
->vtx
.active_sz
[VBO_ATTRIB_NORMAL
] != 3)
710 vbo_exec_fixup_vertex(ctx
, VBO_ATTRIB_NORMAL
, 3, GL_FLOAT
);
713 memcpy(exec
->vtx
.copied
.buffer
, exec
->vtx
.vertex
,
714 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
716 vbo_exec_do_EvalCoord2f(exec
, u
, v
);
718 memcpy(exec
->vtx
.vertex
, exec
->vtx
.copied
.buffer
,
719 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
723 static void GLAPIENTRY
724 vbo_exec_EvalCoord1fv(const GLfloat
*u
)
726 vbo_exec_EvalCoord1f(u
[0]);
730 static void GLAPIENTRY
731 vbo_exec_EvalCoord2fv(const GLfloat
*u
)
733 vbo_exec_EvalCoord2f(u
[0], u
[1]);
737 static void GLAPIENTRY
738 vbo_exec_EvalPoint1(GLint i
)
740 GET_CURRENT_CONTEXT(ctx
);
741 GLfloat du
= ((ctx
->Eval
.MapGrid1u2
- ctx
->Eval
.MapGrid1u1
) /
742 (GLfloat
) ctx
->Eval
.MapGrid1un
);
743 GLfloat u
= i
* du
+ ctx
->Eval
.MapGrid1u1
;
745 vbo_exec_EvalCoord1f(u
);
749 static void GLAPIENTRY
750 vbo_exec_EvalPoint2(GLint i
, GLint j
)
752 GET_CURRENT_CONTEXT(ctx
);
753 GLfloat du
= ((ctx
->Eval
.MapGrid2u2
- ctx
->Eval
.MapGrid2u1
) /
754 (GLfloat
) ctx
->Eval
.MapGrid2un
);
755 GLfloat dv
= ((ctx
->Eval
.MapGrid2v2
- ctx
->Eval
.MapGrid2v1
) /
756 (GLfloat
) ctx
->Eval
.MapGrid2vn
);
757 GLfloat u
= i
* du
+ ctx
->Eval
.MapGrid2u1
;
758 GLfloat v
= j
* dv
+ ctx
->Eval
.MapGrid2v1
;
760 vbo_exec_EvalCoord2f(u
, v
);
765 * Called via glBegin.
767 static void GLAPIENTRY
768 vbo_exec_Begin(GLenum mode
)
770 GET_CURRENT_CONTEXT(ctx
);
771 struct vbo_context
*vbo
= vbo_context(ctx
);
772 struct vbo_exec_context
*exec
= &vbo
->exec
;
775 if (_mesa_inside_begin_end(ctx
)) {
776 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glBegin");
780 if (!_mesa_valid_prim_mode(ctx
, mode
, "glBegin")) {
785 _mesa_update_state(ctx
);
787 CALL_Begin(ctx
->Exec
, (mode
));
791 if (!_mesa_valid_to_render(ctx
, "glBegin")) {
795 /* Heuristic: attempt to isolate attributes occurring outside
798 if (exec
->vtx
.vertex_size
&& !exec
->vtx
.attrsz
[0])
799 vbo_exec_FlushVertices_internal(exec
, GL_FALSE
);
801 i
= exec
->vtx
.prim_count
++;
802 exec
->vtx
.prim
[i
].mode
= mode
;
803 exec
->vtx
.prim
[i
].begin
= 1;
804 exec
->vtx
.prim
[i
].end
= 0;
805 exec
->vtx
.prim
[i
].indexed
= 0;
806 exec
->vtx
.prim
[i
].pad
= 0;
807 exec
->vtx
.prim
[i
].start
= exec
->vtx
.vert_count
;
808 exec
->vtx
.prim
[i
].count
= 0;
809 exec
->vtx
.prim
[i
].num_instances
= 1;
810 exec
->vtx
.prim
[i
].base_instance
= 0;
811 exec
->vtx
.prim
[i
].is_indirect
= 0;
813 ctx
->Driver
.CurrentExecPrimitive
= mode
;
815 ctx
->Exec
= ctx
->BeginEnd
;
817 /* We may have been called from a display list, in which case we should
818 * leave dlist.c's dispatch table in place.
820 if (ctx
->CurrentClientDispatch
== ctx
->MarshalExec
) {
821 ctx
->CurrentServerDispatch
= ctx
->Exec
;
822 } else if (ctx
->CurrentClientDispatch
== ctx
->OutsideBeginEnd
) {
823 ctx
->CurrentClientDispatch
= ctx
->Exec
;
824 _glapi_set_dispatch(ctx
->CurrentClientDispatch
);
826 assert(ctx
->CurrentClientDispatch
== ctx
->Save
);
832 * Try to merge / concatenate the two most recent VBO primitives.
835 try_vbo_merge(struct vbo_exec_context
*exec
)
837 struct _mesa_prim
*cur
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
839 assert(exec
->vtx
.prim_count
>= 1);
841 vbo_try_prim_conversion(cur
);
843 if (exec
->vtx
.prim_count
>= 2) {
844 struct _mesa_prim
*prev
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 2];
845 assert(prev
== cur
- 1);
847 if (vbo_can_merge_prims(prev
, cur
)) {
852 vbo_merge_prims(prev
, cur
);
853 exec
->vtx
.prim_count
--; /* drop the last primitive */
862 static void GLAPIENTRY
865 GET_CURRENT_CONTEXT(ctx
);
866 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
868 if (!_mesa_inside_begin_end(ctx
)) {
869 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glEnd");
873 ctx
->Exec
= ctx
->OutsideBeginEnd
;
875 if (ctx
->CurrentClientDispatch
== ctx
->MarshalExec
) {
876 ctx
->CurrentServerDispatch
= ctx
->Exec
;
877 } else if (ctx
->CurrentClientDispatch
== ctx
->BeginEnd
) {
878 ctx
->CurrentClientDispatch
= ctx
->Exec
;
879 _glapi_set_dispatch(ctx
->CurrentClientDispatch
);
882 if (exec
->vtx
.prim_count
> 0) {
883 /* close off current primitive */
884 struct _mesa_prim
*last_prim
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
887 last_prim
->count
= exec
->vtx
.vert_count
- last_prim
->start
;
889 /* Special handling for GL_LINE_LOOP */
890 if (last_prim
->mode
== GL_LINE_LOOP
&& last_prim
->begin
== 0) {
891 /* We're finishing drawing a line loop. Append 0th vertex onto
892 * end of vertex buffer so we can draw it as a line strip.
894 const fi_type
*src
= exec
->vtx
.buffer_map
+
895 last_prim
->start
* exec
->vtx
.vertex_size
;
896 fi_type
*dst
= exec
->vtx
.buffer_map
+
897 exec
->vtx
.vert_count
* exec
->vtx
.vertex_size
;
899 /* copy 0th vertex to end of buffer */
900 memcpy(dst
, src
, exec
->vtx
.vertex_size
* sizeof(fi_type
));
902 last_prim
->start
++; /* skip vertex0 */
903 /* note that last_prim->count stays unchanged */
904 last_prim
->mode
= GL_LINE_STRIP
;
906 /* Increment the vertex count so the next primitive doesn't
907 * overwrite the last vertex which we just added.
909 exec
->vtx
.vert_count
++;
910 exec
->vtx
.buffer_ptr
+= exec
->vtx
.vertex_size
;
916 ctx
->Driver
.CurrentExecPrimitive
= PRIM_OUTSIDE_BEGIN_END
;
918 if (exec
->vtx
.prim_count
== VBO_MAX_PRIM
)
919 vbo_exec_vtx_flush(exec
, GL_FALSE
);
921 if (MESA_DEBUG_FLAGS
& DEBUG_ALWAYS_FLUSH
) {
928 * Called via glPrimitiveRestartNV()
930 static void GLAPIENTRY
931 vbo_exec_PrimitiveRestartNV(void)
934 GET_CURRENT_CONTEXT(ctx
);
936 curPrim
= ctx
->Driver
.CurrentExecPrimitive
;
938 if (curPrim
== PRIM_OUTSIDE_BEGIN_END
) {
939 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glPrimitiveRestartNV");
943 vbo_exec_Begin(curPrim
);
949 vbo_exec_vtxfmt_init(struct vbo_exec_context
*exec
)
951 struct gl_context
*ctx
= exec
->ctx
;
952 GLvertexformat
*vfmt
= &exec
->vtxfmt
;
954 #define NAME_AE(x) _ae_##x
955 #define NAME_CALLLIST(x) _mesa_##x
956 #define NAME(x) vbo_exec_##x
957 #define NAME_ES(x) _es_##x
959 #include "vbo_init_tmp.h"
964 * Tell the VBO module to use a real OpenGL vertex buffer object to
965 * store accumulated immediate-mode vertex data.
966 * This replaces the malloced buffer which was created in
967 * vb_exec_vtx_init() below.
970 vbo_use_buffer_objects(struct gl_context
*ctx
)
972 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
973 /* Any buffer name but 0 can be used here since this bufferobj won't
974 * go into the bufferobj hashtable.
976 GLuint bufName
= IMM_BUFFER_NAME
;
978 /* Make sure this func is only used once */
979 assert(exec
->vtx
.bufferobj
== ctx
->Shared
->NullBufferObj
);
981 _mesa_align_free(exec
->vtx
.buffer_map
);
982 exec
->vtx
.buffer_map
= NULL
;
983 exec
->vtx
.buffer_ptr
= NULL
;
985 /* Allocate a real buffer object now */
986 _mesa_reference_buffer_object(ctx
, &exec
->vtx
.bufferobj
, NULL
);
987 exec
->vtx
.bufferobj
= ctx
->Driver
.NewBufferObject(ctx
, bufName
);
992 * If this function is called, all VBO buffers will be unmapped when
994 * Otherwise, if a simple command like glColor3f() is called and we flush,
995 * the current VBO may be left mapped.
998 vbo_always_unmap_buffers(struct gl_context
*ctx
)
1000 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
1001 exec
->begin_vertices_flags
|= FLUSH_STORED_VERTICES
;
1006 vbo_exec_vtx_init(struct vbo_exec_context
*exec
)
1008 struct gl_context
*ctx
= exec
->ctx
;
1011 /* Allocate a buffer object. Will just reuse this object
1012 * continuously, unless vbo_use_buffer_objects() is called to enable
1015 _mesa_reference_buffer_object(ctx
,
1016 &exec
->vtx
.bufferobj
,
1017 ctx
->Shared
->NullBufferObj
);
1019 assert(!exec
->vtx
.buffer_map
);
1020 exec
->vtx
.buffer_map
= _mesa_align_malloc(VBO_VERT_BUFFER_SIZE
, 64);
1021 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
1023 vbo_exec_vtxfmt_init(exec
);
1024 _mesa_noop_vtxfmt_init(ctx
, &exec
->vtxfmt_noop
);
1026 exec
->vtx
.enabled
= 0;
1027 for (i
= 0 ; i
< VBO_ATTRIB_MAX
; i
++) {
1028 assert(i
< ARRAY_SIZE(exec
->vtx
.attrsz
));
1029 exec
->vtx
.attrsz
[i
] = 0;
1030 assert(i
< ARRAY_SIZE(exec
->vtx
.attrtype
));
1031 exec
->vtx
.attrtype
[i
] = GL_FLOAT
;
1032 assert(i
< ARRAY_SIZE(exec
->vtx
.active_sz
));
1033 exec
->vtx
.active_sz
[i
] = 0;
1036 exec
->vtx
.vertex_size
= 0;
1038 exec
->begin_vertices_flags
= FLUSH_UPDATE_CURRENT
;
1043 vbo_exec_vtx_destroy(struct vbo_exec_context
*exec
)
1045 /* using a real VBO for vertex data */
1046 struct gl_context
*ctx
= exec
->ctx
;
1048 /* True VBOs should already be unmapped
1050 if (exec
->vtx
.buffer_map
) {
1051 assert(exec
->vtx
.bufferobj
->Name
== 0 ||
1052 exec
->vtx
.bufferobj
->Name
== IMM_BUFFER_NAME
);
1053 if (exec
->vtx
.bufferobj
->Name
== 0) {
1054 _mesa_align_free(exec
->vtx
.buffer_map
);
1055 exec
->vtx
.buffer_map
= NULL
;
1056 exec
->vtx
.buffer_ptr
= NULL
;
1060 /* Free the vertex buffer. Unmap first if needed.
1062 if (_mesa_bufferobj_mapped(exec
->vtx
.bufferobj
, MAP_INTERNAL
)) {
1063 ctx
->Driver
.UnmapBuffer(ctx
, exec
->vtx
.bufferobj
, MAP_INTERNAL
);
1065 _mesa_reference_buffer_object(ctx
, &exec
->vtx
.bufferobj
, NULL
);
1070 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1071 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1072 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1073 * __struct gl_contextRec::Current and gl_light_attrib::Material
1075 * Note that the default T&L engine never clears the
1076 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1078 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1081 vbo_exec_FlushVertices(struct gl_context
*ctx
, GLuint flags
)
1083 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
1086 /* debug check: make sure we don't get called recursively */
1087 exec
->flush_call_depth
++;
1088 assert(exec
->flush_call_depth
== 1);
1091 if (_mesa_inside_begin_end(ctx
)) {
1092 /* We've had glBegin but not glEnd! */
1094 exec
->flush_call_depth
--;
1095 assert(exec
->flush_call_depth
== 0);
1100 /* Flush (draw), and make sure VBO is left unmapped when done */
1101 vbo_exec_FlushVertices_internal(exec
, GL_TRUE
);
1103 /* Need to do this to ensure vbo_exec_begin_vertices gets called again:
1105 ctx
->Driver
.NeedFlush
&= ~(FLUSH_UPDATE_CURRENT
| flags
);
1108 exec
->flush_call_depth
--;
1109 assert(exec
->flush_call_depth
== 0);
1115 * Reset the vertex attribute by setting its size to zero.
1118 vbo_reset_attr(struct vbo_exec_context
*exec
, GLuint attr
)
1120 exec
->vtx
.attrsz
[attr
] = 0;
1121 exec
->vtx
.attrtype
[attr
] = GL_FLOAT
;
1122 exec
->vtx
.active_sz
[attr
] = 0;
1127 vbo_reset_all_attr(struct vbo_exec_context
*exec
)
1129 while (exec
->vtx
.enabled
) {
1130 const int i
= u_bit_scan64(&exec
->vtx
.enabled
);
1131 vbo_reset_attr(exec
, i
);
1134 exec
->vtx
.vertex_size
= 0;
1139 _es_Color4f(GLfloat r
, GLfloat g
, GLfloat b
, GLfloat a
)
1141 vbo_exec_Color4f(r
, g
, b
, a
);
1146 _es_Normal3f(GLfloat x
, GLfloat y
, GLfloat z
)
1148 vbo_exec_Normal3f(x
, y
, z
);
1153 _es_MultiTexCoord4f(GLenum target
, GLfloat s
, GLfloat t
, GLfloat r
, GLfloat q
)
1155 vbo_exec_MultiTexCoord4f(target
, s
, t
, r
, q
);
1160 _es_Materialfv(GLenum face
, GLenum pname
, const GLfloat
*params
)
1162 vbo_exec_Materialfv(face
, pname
, params
);
1167 _es_Materialf(GLenum face
, GLenum pname
, GLfloat param
)
1171 p
[1] = p
[2] = p
[3] = 0.0F
;
1172 vbo_exec_Materialfv(face
, pname
, p
);
1177 * A special version of glVertexAttrib4f that does not treat index 0 as
1181 VertexAttrib4f_nopos(GLuint index
, GLfloat x
, GLfloat y
, GLfloat z
, GLfloat w
)
1183 GET_CURRENT_CONTEXT(ctx
);
1184 if (index
< MAX_VERTEX_GENERIC_ATTRIBS
)
1185 ATTRF(VBO_ATTRIB_GENERIC0
+ index
, 4, x
, y
, z
, w
);
1187 ERROR(GL_INVALID_VALUE
);
1191 _es_VertexAttrib4f(GLuint index
, GLfloat x
, GLfloat y
, GLfloat z
, GLfloat w
)
1193 VertexAttrib4f_nopos(index
, x
, y
, z
, w
);
1198 _es_VertexAttrib1f(GLuint indx
, GLfloat x
)
1200 VertexAttrib4f_nopos(indx
, x
, 0.0f
, 0.0f
, 1.0f
);
1205 _es_VertexAttrib1fv(GLuint indx
, const GLfloat
* values
)
1207 VertexAttrib4f_nopos(indx
, values
[0], 0.0f
, 0.0f
, 1.0f
);
1212 _es_VertexAttrib2f(GLuint indx
, GLfloat x
, GLfloat y
)
1214 VertexAttrib4f_nopos(indx
, x
, y
, 0.0f
, 1.0f
);
1219 _es_VertexAttrib2fv(GLuint indx
, const GLfloat
* values
)
1221 VertexAttrib4f_nopos(indx
, values
[0], values
[1], 0.0f
, 1.0f
);
1226 _es_VertexAttrib3f(GLuint indx
, GLfloat x
, GLfloat y
, GLfloat z
)
1228 VertexAttrib4f_nopos(indx
, x
, y
, z
, 1.0f
);
1233 _es_VertexAttrib3fv(GLuint indx
, const GLfloat
* values
)
1235 VertexAttrib4f_nopos(indx
, values
[0], values
[1], values
[2], 1.0f
);
1240 _es_VertexAttrib4fv(GLuint indx
, const GLfloat
* values
)
1242 VertexAttrib4f_nopos(indx
, values
[0], values
[1], values
[2], values
[3]);