1 /**************************************************************************
3 Copyright 2002-2008 VMware, Inc.
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Keith Whitwell <keithw@vmware.com>
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/api_validate.h"
44 #include "main/dispatch.h"
46 #include "vbo_context.h"
55 /** ID/name for immediate-mode VBO */
56 #define IMM_BUFFER_NAME 0xaabbccdd
59 static void reset_attrfv( struct vbo_exec_context
*exec
);
63 * Close off the last primitive, execute the buffer, restart the
66 static void vbo_exec_wrap_buffers( struct vbo_exec_context
*exec
)
68 if (exec
->vtx
.prim_count
== 0) {
69 exec
->vtx
.copied
.nr
= 0;
70 exec
->vtx
.vert_count
= 0;
71 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
74 struct _mesa_prim
*last_prim
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
75 const GLuint last_begin
= last_prim
->begin
;
78 if (_mesa_inside_begin_end(exec
->ctx
)) {
79 last_prim
->count
= exec
->vtx
.vert_count
- last_prim
->start
;
82 last_count
= last_prim
->count
;
84 /* Execute the buffer and save copied vertices.
86 if (exec
->vtx
.vert_count
)
87 vbo_exec_vtx_flush( exec
, GL_FALSE
);
89 exec
->vtx
.prim_count
= 0;
90 exec
->vtx
.copied
.nr
= 0;
93 /* Emit a glBegin to start the new list.
95 assert(exec
->vtx
.prim_count
== 0);
97 if (_mesa_inside_begin_end(exec
->ctx
)) {
98 exec
->vtx
.prim
[0].mode
= exec
->ctx
->Driver
.CurrentExecPrimitive
;
99 exec
->vtx
.prim
[0].start
= 0;
100 exec
->vtx
.prim
[0].count
= 0;
101 exec
->vtx
.prim_count
++;
103 if (exec
->vtx
.copied
.nr
== last_count
)
104 exec
->vtx
.prim
[0].begin
= last_begin
;
111 * Deal with buffer wrapping where provoked by the vertex buffer
112 * filling up, as opposed to upgrade_vertex().
115 vbo_exec_vtx_wrap(struct vbo_exec_context
*exec
)
117 fi_type
*data
= exec
->vtx
.copied
.buffer
;
120 /* Run pipeline on current vertices, copy wrapped vertices
121 * to exec->vtx.copied.
123 vbo_exec_wrap_buffers( exec
);
125 if (!exec
->vtx
.buffer_ptr
) {
126 /* probably ran out of memory earlier when allocating the VBO */
130 /* Copy stored stored vertices to start of new list.
132 assert(exec
->vtx
.max_vert
- exec
->vtx
.vert_count
> exec
->vtx
.copied
.nr
);
134 for (i
= 0 ; i
< exec
->vtx
.copied
.nr
; i
++) {
135 memcpy( exec
->vtx
.buffer_ptr
, data
,
136 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
137 exec
->vtx
.buffer_ptr
+= exec
->vtx
.vertex_size
;
138 data
+= exec
->vtx
.vertex_size
;
139 exec
->vtx
.vert_count
++;
142 exec
->vtx
.copied
.nr
= 0;
147 * Copy the active vertex's values to the ctx->Current fields.
149 static void vbo_exec_copy_to_current( struct vbo_exec_context
*exec
)
151 struct gl_context
*ctx
= exec
->ctx
;
152 struct vbo_context
*vbo
= vbo_context(ctx
);
155 for (i
= VBO_ATTRIB_POS
+1 ; i
< VBO_ATTRIB_MAX
; i
++) {
156 if (exec
->vtx
.attrsz
[i
]) {
157 /* Note: the exec->vtx.current[i] pointers point into the
158 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
160 GLfloat
*current
= (GLfloat
*)vbo
->currval
[i
].Ptr
;
161 fi_type tmp
[8]; /* space for doubles */
162 int dmul
= exec
->vtx
.attrtype
[i
] == GL_DOUBLE
? 2 : 1;
164 if (exec
->vtx
.attrtype
[i
] == GL_DOUBLE
) {
165 memset(tmp
, 0, sizeof(tmp
));
166 memcpy(tmp
, exec
->vtx
.attrptr
[i
], exec
->vtx
.attrsz
[i
] * sizeof(GLfloat
));
168 COPY_CLEAN_4V_TYPE_AS_UNION(tmp
,
170 exec
->vtx
.attrptr
[i
],
171 exec
->vtx
.attrtype
[i
]);
174 if (exec
->vtx
.attrtype
[i
] != vbo
->currval
[i
].Type
||
175 memcmp(current
, tmp
, 4 * sizeof(GLfloat
) * dmul
) != 0) {
176 memcpy(current
, tmp
, 4 * sizeof(GLfloat
) * dmul
);
178 /* Given that we explicitly state size here, there is no need
179 * for the COPY_CLEAN above, could just copy 16 bytes and be
180 * done. The only problem is when Mesa accesses ctx->Current
183 /* Size here is in components - not bytes */
184 vbo
->currval
[i
].Size
= exec
->vtx
.attrsz
[i
] / dmul
;
185 vbo
->currval
[i
]._ElementSize
= vbo
->currval
[i
].Size
* sizeof(GLfloat
) * dmul
;
186 vbo
->currval
[i
].Type
= exec
->vtx
.attrtype
[i
];
187 vbo
->currval
[i
].Integer
=
188 vbo_attrtype_to_integer_flag(exec
->vtx
.attrtype
[i
]);
189 vbo
->currval
[i
].Doubles
=
190 vbo_attrtype_to_double_flag(exec
->vtx
.attrtype
[i
]);
192 /* This triggers rather too much recalculation of Mesa state
193 * that doesn't get used (eg light positions).
195 if (i
>= VBO_ATTRIB_MAT_FRONT_AMBIENT
&&
196 i
<= VBO_ATTRIB_MAT_BACK_INDEXES
)
197 ctx
->NewState
|= _NEW_LIGHT
;
199 ctx
->NewState
|= _NEW_CURRENT_ATTRIB
;
204 /* Colormaterial -- this kindof sucks.
206 if (ctx
->Light
.ColorMaterialEnabled
&&
207 exec
->vtx
.attrsz
[VBO_ATTRIB_COLOR0
]) {
208 _mesa_update_color_material(ctx
,
209 ctx
->Current
.Attrib
[VBO_ATTRIB_COLOR0
]);
215 * Copy current vertex attribute values into the current vertex.
218 vbo_exec_copy_from_current(struct vbo_exec_context
*exec
)
220 struct gl_context
*ctx
= exec
->ctx
;
221 struct vbo_context
*vbo
= vbo_context(ctx
);
224 for (i
= VBO_ATTRIB_POS
+ 1; i
< VBO_ATTRIB_MAX
; i
++) {
225 if (exec
->vtx
.attrtype
[i
] == GL_DOUBLE
) {
226 memcpy(exec
->vtx
.attrptr
[i
], vbo
->currval
[i
].Ptr
, exec
->vtx
.attrsz
[i
] * sizeof(GLfloat
));
228 const fi_type
*current
= (fi_type
*) vbo
->currval
[i
].Ptr
;
229 switch (exec
->vtx
.attrsz
[i
]) {
230 case 4: exec
->vtx
.attrptr
[i
][3] = current
[3];
231 case 3: exec
->vtx
.attrptr
[i
][2] = current
[2];
232 case 2: exec
->vtx
.attrptr
[i
][1] = current
[1];
233 case 1: exec
->vtx
.attrptr
[i
][0] = current
[0];
242 * Flush existing data, set new attrib size, replay copied vertices.
243 * This is called when we transition from a small vertex attribute size
244 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
245 * We need to go back over the previous 2-component texcoords and insert
246 * zero and one values.
249 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context
*exec
,
250 GLuint attr
, GLuint newSize
)
252 struct gl_context
*ctx
= exec
->ctx
;
253 struct vbo_context
*vbo
= vbo_context(ctx
);
254 const GLint lastcount
= exec
->vtx
.vert_count
;
255 fi_type
*old_attrptr
[VBO_ATTRIB_MAX
];
256 const GLuint old_vtx_size
= exec
->vtx
.vertex_size
; /* floats per vertex */
257 const GLuint oldSize
= exec
->vtx
.attrsz
[attr
];
260 /* Run pipeline on current vertices, copy wrapped vertices
261 * to exec->vtx.copied.
263 vbo_exec_wrap_buffers( exec
);
265 if (unlikely(exec
->vtx
.copied
.nr
)) {
266 /* We're in the middle of a primitive, keep the old vertex
267 * format around to be able to translate the copied vertices to
270 memcpy(old_attrptr
, exec
->vtx
.attrptr
, sizeof(old_attrptr
));
273 if (unlikely(oldSize
)) {
274 /* Do a COPY_TO_CURRENT to ensure back-copying works for the
275 * case when the attribute already exists in the vertex and is
276 * having its size increased.
278 vbo_exec_copy_to_current( exec
);
281 /* Heuristic: Attempt to isolate attributes received outside
282 * begin/end so that they don't bloat the vertices.
284 if (!_mesa_inside_begin_end(ctx
) &&
285 !oldSize
&& lastcount
> 8 && exec
->vtx
.vertex_size
) {
286 vbo_exec_copy_to_current( exec
);
287 reset_attrfv( exec
);
292 exec
->vtx
.attrsz
[attr
] = newSize
;
293 exec
->vtx
.vertex_size
+= newSize
- oldSize
;
294 exec
->vtx
.max_vert
= ((VBO_VERT_BUFFER_SIZE
- exec
->vtx
.buffer_used
) /
295 (exec
->vtx
.vertex_size
* sizeof(GLfloat
)));
296 exec
->vtx
.vert_count
= 0;
297 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
299 if (unlikely(oldSize
)) {
300 /* Size changed, recalculate all the attrptr[] values
302 fi_type
*tmp
= exec
->vtx
.vertex
;
304 for (i
= 0 ; i
< VBO_ATTRIB_MAX
; i
++) {
305 if (exec
->vtx
.attrsz
[i
]) {
306 exec
->vtx
.attrptr
[i
] = tmp
;
307 tmp
+= exec
->vtx
.attrsz
[i
];
310 exec
->vtx
.attrptr
[i
] = NULL
; /* will not be dereferenced */
313 /* Copy from current to repopulate the vertex with correct
316 vbo_exec_copy_from_current( exec
);
319 /* Just have to append the new attribute at the end */
320 exec
->vtx
.attrptr
[attr
] = exec
->vtx
.vertex
+
321 exec
->vtx
.vertex_size
- newSize
;
324 /* Replay stored vertices to translate them
325 * to new format here.
327 * -- No need to replay - just copy piecewise
329 if (unlikely(exec
->vtx
.copied
.nr
)) {
330 fi_type
*data
= exec
->vtx
.copied
.buffer
;
331 fi_type
*dest
= exec
->vtx
.buffer_ptr
;
334 assert(exec
->vtx
.buffer_ptr
== exec
->vtx
.buffer_map
);
336 for (i
= 0 ; i
< exec
->vtx
.copied
.nr
; i
++) {
337 for (j
= 0 ; j
< VBO_ATTRIB_MAX
; j
++) {
338 GLuint sz
= exec
->vtx
.attrsz
[j
];
341 GLint old_offset
= old_attrptr
[j
] - exec
->vtx
.vertex
;
342 GLint new_offset
= exec
->vtx
.attrptr
[j
] - exec
->vtx
.vertex
;
347 COPY_CLEAN_4V_TYPE_AS_UNION(tmp
, oldSize
,
349 exec
->vtx
.attrtype
[j
]);
350 COPY_SZ_4V(dest
+ new_offset
, newSize
, tmp
);
352 fi_type
*current
= (fi_type
*)vbo
->currval
[j
].Ptr
;
353 COPY_SZ_4V(dest
+ new_offset
, sz
, current
);
357 COPY_SZ_4V(dest
+ new_offset
, sz
, data
+ old_offset
);
362 data
+= old_vtx_size
;
363 dest
+= exec
->vtx
.vertex_size
;
366 exec
->vtx
.buffer_ptr
= dest
;
367 exec
->vtx
.vert_count
+= exec
->vtx
.copied
.nr
;
368 exec
->vtx
.copied
.nr
= 0;
374 * This is when a vertex attribute transitions to a different size.
375 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
376 * glTexCoord4f() call. We promote the array from size=2 to size=4.
377 * \param newSize size of new vertex (number of 32-bit words).
380 vbo_exec_fixup_vertex(struct gl_context
*ctx
, GLuint attr
,
381 GLuint newSize
, GLenum newType
)
383 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
385 if (newSize
> exec
->vtx
.attrsz
[attr
] ||
386 newType
!= exec
->vtx
.attrtype
[attr
]) {
387 /* New size is larger. Need to flush existing vertices and get
388 * an enlarged vertex format.
390 vbo_exec_wrap_upgrade_vertex( exec
, attr
, newSize
);
392 else if (newSize
< exec
->vtx
.active_sz
[attr
]) {
395 vbo_get_default_vals_as_union(exec
->vtx
.attrtype
[attr
]);
397 /* New size is smaller - just need to fill in some
398 * zeros. Don't need to flush or wrap.
400 for (i
= newSize
; i
<= exec
->vtx
.attrsz
[attr
]; i
++)
401 exec
->vtx
.attrptr
[attr
][i
-1] = id
[i
-1];
404 exec
->vtx
.active_sz
[attr
] = newSize
;
406 /* Does setting NeedFlush belong here? Necessitates resetting
407 * vtxfmt on each flush (otherwise flags won't get reset
411 ctx
->Driver
.NeedFlush
|= FLUSH_STORED_VERTICES
;
416 * Called upon first glVertex, glColor, glTexCoord, etc.
419 vbo_exec_begin_vertices(struct gl_context
*ctx
)
421 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
423 vbo_exec_vtx_map( exec
);
425 assert((ctx
->Driver
.NeedFlush
& FLUSH_UPDATE_CURRENT
) == 0);
426 assert(exec
->begin_vertices_flags
);
428 ctx
->Driver
.NeedFlush
|= exec
->begin_vertices_flags
;
433 * This macro is used to implement all the glVertex, glColor, glTexCoord,
434 * glVertexAttrib, etc functions.
435 * \param A attribute index
436 * \param N attribute size (1..4)
437 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
438 * \param C cast type (fi_type or double)
439 * \param V0, V1, v2, V3 attribute value
441 #define ATTR_UNION( A, N, T, C, V0, V1, V2, V3 ) \
443 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
444 int sz = (sizeof(C) / sizeof(GLfloat)); \
446 assert(sz == 1 || sz == 2); \
448 /* check if attribute size or type is changing */ \
449 if (unlikely(exec->vtx.active_sz[A] != N * sz) || \
450 unlikely(exec->vtx.attrtype[A] != T)) { \
451 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
454 /* store vertex attribute in vertex buffer */ \
456 C *dest = (C *)exec->vtx.attrptr[A]; \
457 if (N>0) dest[0] = V0; \
458 if (N>1) dest[1] = V1; \
459 if (N>2) dest[2] = V2; \
460 if (N>3) dest[3] = V3; \
461 exec->vtx.attrtype[A] = T; \
465 /* This is a glVertex call */ \
468 if (unlikely((ctx->Driver.NeedFlush & FLUSH_UPDATE_CURRENT) == 0)) { \
469 vbo_exec_begin_vertices(ctx); \
472 if (unlikely(!exec->vtx.buffer_ptr)) { \
473 vbo_exec_vtx_map(exec); \
475 assert(exec->vtx.buffer_ptr); \
477 /* copy 32-bit words */ \
478 for (i = 0; i < exec->vtx.vertex_size; i++) \
479 exec->vtx.buffer_ptr[i] = exec->vtx.vertex[i]; \
481 exec->vtx.buffer_ptr += exec->vtx.vertex_size; \
483 /* Set FLUSH_STORED_VERTICES to indicate that there's now */ \
484 /* something to draw (not just updating a color or texcoord).*/ \
485 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES; \
487 if (++exec->vtx.vert_count >= exec->vtx.max_vert) \
488 vbo_exec_vtx_wrap( exec ); \
490 /* we now have accumulated per-vertex attributes */ \
491 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
495 #define ERROR(err) _mesa_error( ctx, err, __func__ )
496 #define TAG(x) vbo_##x
498 #include "vbo_attrib_tmp.h"
503 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
504 * this may be a (partial) no-op.
506 static void GLAPIENTRY
507 vbo_Materialfv(GLenum face
, GLenum pname
, const GLfloat
*params
)
509 GLbitfield updateMats
;
510 GET_CURRENT_CONTEXT(ctx
);
512 /* This function should be a no-op when it tries to update material
513 * attributes which are currently tracking glColor via glColorMaterial.
514 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
515 * indicating which material attributes can actually be updated below.
517 if (ctx
->Light
.ColorMaterialEnabled
) {
518 updateMats
= ~ctx
->Light
._ColorMaterialBitmask
;
521 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
522 updateMats
= ALL_MATERIAL_BITS
;
525 if (ctx
->API
== API_OPENGL_COMPAT
&& face
== GL_FRONT
) {
526 updateMats
&= FRONT_MATERIAL_BITS
;
528 else if (ctx
->API
== API_OPENGL_COMPAT
&& face
== GL_BACK
) {
529 updateMats
&= BACK_MATERIAL_BITS
;
531 else if (face
!= GL_FRONT_AND_BACK
) {
532 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterial(invalid face)");
538 if (updateMats
& MAT_BIT_FRONT_EMISSION
)
539 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION
, 4, params
);
540 if (updateMats
& MAT_BIT_BACK_EMISSION
)
541 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION
, 4, params
);
544 if (updateMats
& MAT_BIT_FRONT_AMBIENT
)
545 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT
, 4, params
);
546 if (updateMats
& MAT_BIT_BACK_AMBIENT
)
547 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT
, 4, params
);
550 if (updateMats
& MAT_BIT_FRONT_DIFFUSE
)
551 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE
, 4, params
);
552 if (updateMats
& MAT_BIT_BACK_DIFFUSE
)
553 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE
, 4, params
);
556 if (updateMats
& MAT_BIT_FRONT_SPECULAR
)
557 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR
, 4, params
);
558 if (updateMats
& MAT_BIT_BACK_SPECULAR
)
559 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR
, 4, params
);
562 if (*params
< 0 || *params
> ctx
->Const
.MaxShininess
) {
563 _mesa_error(ctx
, GL_INVALID_VALUE
,
564 "glMaterial(invalid shininess: %f out range [0, %f])",
565 *params
, ctx
->Const
.MaxShininess
);
568 if (updateMats
& MAT_BIT_FRONT_SHININESS
)
569 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS
, 1, params
);
570 if (updateMats
& MAT_BIT_BACK_SHININESS
)
571 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS
, 1, params
);
573 case GL_COLOR_INDEXES
:
574 if (ctx
->API
!= API_OPENGL_COMPAT
) {
575 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterialfv(pname)");
578 if (updateMats
& MAT_BIT_FRONT_INDEXES
)
579 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES
, 3, params
);
580 if (updateMats
& MAT_BIT_BACK_INDEXES
)
581 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES
, 3, params
);
583 case GL_AMBIENT_AND_DIFFUSE
:
584 if (updateMats
& MAT_BIT_FRONT_AMBIENT
)
585 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT
, 4, params
);
586 if (updateMats
& MAT_BIT_FRONT_DIFFUSE
)
587 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE
, 4, params
);
588 if (updateMats
& MAT_BIT_BACK_AMBIENT
)
589 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT
, 4, params
);
590 if (updateMats
& MAT_BIT_BACK_DIFFUSE
)
591 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE
, 4, params
);
594 _mesa_error(ctx
, GL_INVALID_ENUM
, "glMaterialfv(pname)");
601 * Flush (draw) vertices.
602 * \param unmap - leave VBO unmapped after flushing?
605 vbo_exec_FlushVertices_internal(struct vbo_exec_context
*exec
, GLboolean unmap
)
607 if (exec
->vtx
.vert_count
|| unmap
) {
608 vbo_exec_vtx_flush( exec
, unmap
);
611 if (exec
->vtx
.vertex_size
) {
612 vbo_exec_copy_to_current( exec
);
613 reset_attrfv( exec
);
618 static void GLAPIENTRY
vbo_exec_EvalCoord1f( GLfloat u
)
620 GET_CURRENT_CONTEXT( ctx
);
621 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
625 if (exec
->eval
.recalculate_maps
)
626 vbo_exec_eval_update( exec
);
628 for (i
= 0; i
<= VBO_ATTRIB_TEX7
; i
++) {
629 if (exec
->eval
.map1
[i
].map
)
630 if (exec
->vtx
.active_sz
[i
] != exec
->eval
.map1
[i
].sz
)
631 vbo_exec_fixup_vertex( ctx
, i
, exec
->eval
.map1
[i
].sz
, GL_FLOAT
);
636 memcpy( exec
->vtx
.copied
.buffer
, exec
->vtx
.vertex
,
637 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
639 vbo_exec_do_EvalCoord1f( exec
, u
);
641 memcpy( exec
->vtx
.vertex
, exec
->vtx
.copied
.buffer
,
642 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
645 static void GLAPIENTRY
vbo_exec_EvalCoord2f( GLfloat u
, GLfloat v
)
647 GET_CURRENT_CONTEXT( ctx
);
648 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
652 if (exec
->eval
.recalculate_maps
)
653 vbo_exec_eval_update( exec
);
655 for (i
= 0; i
<= VBO_ATTRIB_TEX7
; i
++) {
656 if (exec
->eval
.map2
[i
].map
)
657 if (exec
->vtx
.active_sz
[i
] != exec
->eval
.map2
[i
].sz
)
658 vbo_exec_fixup_vertex( ctx
, i
, exec
->eval
.map2
[i
].sz
, GL_FLOAT
);
661 if (ctx
->Eval
.AutoNormal
)
662 if (exec
->vtx
.active_sz
[VBO_ATTRIB_NORMAL
] != 3)
663 vbo_exec_fixup_vertex( ctx
, VBO_ATTRIB_NORMAL
, 3, GL_FLOAT
);
666 memcpy( exec
->vtx
.copied
.buffer
, exec
->vtx
.vertex
,
667 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
669 vbo_exec_do_EvalCoord2f( exec
, u
, v
);
671 memcpy( exec
->vtx
.vertex
, exec
->vtx
.copied
.buffer
,
672 exec
->vtx
.vertex_size
* sizeof(GLfloat
));
675 static void GLAPIENTRY
vbo_exec_EvalCoord1fv( const GLfloat
*u
)
677 vbo_exec_EvalCoord1f( u
[0] );
680 static void GLAPIENTRY
vbo_exec_EvalCoord2fv( const GLfloat
*u
)
682 vbo_exec_EvalCoord2f( u
[0], u
[1] );
685 static void GLAPIENTRY
vbo_exec_EvalPoint1( GLint i
)
687 GET_CURRENT_CONTEXT( ctx
);
688 GLfloat du
= ((ctx
->Eval
.MapGrid1u2
- ctx
->Eval
.MapGrid1u1
) /
689 (GLfloat
) ctx
->Eval
.MapGrid1un
);
690 GLfloat u
= i
* du
+ ctx
->Eval
.MapGrid1u1
;
692 vbo_exec_EvalCoord1f( u
);
696 static void GLAPIENTRY
vbo_exec_EvalPoint2( GLint i
, GLint j
)
698 GET_CURRENT_CONTEXT( ctx
);
699 GLfloat du
= ((ctx
->Eval
.MapGrid2u2
- ctx
->Eval
.MapGrid2u1
) /
700 (GLfloat
) ctx
->Eval
.MapGrid2un
);
701 GLfloat dv
= ((ctx
->Eval
.MapGrid2v2
- ctx
->Eval
.MapGrid2v1
) /
702 (GLfloat
) ctx
->Eval
.MapGrid2vn
);
703 GLfloat u
= i
* du
+ ctx
->Eval
.MapGrid2u1
;
704 GLfloat v
= j
* dv
+ ctx
->Eval
.MapGrid2v1
;
706 vbo_exec_EvalCoord2f( u
, v
);
711 * Called via glBegin.
713 static void GLAPIENTRY
vbo_exec_Begin( GLenum mode
)
715 GET_CURRENT_CONTEXT( ctx
);
716 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
719 if (_mesa_inside_begin_end(ctx
)) {
720 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glBegin");
724 if (!_mesa_valid_prim_mode(ctx
, mode
, "glBegin")) {
728 vbo_draw_method(vbo_context(ctx
), DRAW_BEGIN_END
);
731 _mesa_update_state( ctx
);
733 CALL_Begin(ctx
->Exec
, (mode
));
737 if (!_mesa_valid_to_render(ctx
, "glBegin")) {
741 /* Heuristic: attempt to isolate attributes occurring outside
744 if (exec
->vtx
.vertex_size
&& !exec
->vtx
.attrsz
[0])
745 vbo_exec_FlushVertices_internal(exec
, GL_FALSE
);
747 i
= exec
->vtx
.prim_count
++;
748 exec
->vtx
.prim
[i
].mode
= mode
;
749 exec
->vtx
.prim
[i
].begin
= 1;
750 exec
->vtx
.prim
[i
].end
= 0;
751 exec
->vtx
.prim
[i
].indexed
= 0;
752 exec
->vtx
.prim
[i
].weak
= 0;
753 exec
->vtx
.prim
[i
].pad
= 0;
754 exec
->vtx
.prim
[i
].start
= exec
->vtx
.vert_count
;
755 exec
->vtx
.prim
[i
].count
= 0;
756 exec
->vtx
.prim
[i
].num_instances
= 1;
757 exec
->vtx
.prim
[i
].base_instance
= 0;
758 exec
->vtx
.prim
[i
].is_indirect
= 0;
760 ctx
->Driver
.CurrentExecPrimitive
= mode
;
762 ctx
->Exec
= ctx
->BeginEnd
;
763 /* We may have been called from a display list, in which case we should
764 * leave dlist.c's dispatch table in place.
766 if (ctx
->CurrentDispatch
== ctx
->OutsideBeginEnd
) {
767 ctx
->CurrentDispatch
= ctx
->BeginEnd
;
768 _glapi_set_dispatch(ctx
->CurrentDispatch
);
770 assert(ctx
->CurrentDispatch
== ctx
->Save
);
776 * Try to merge / concatenate the two most recent VBO primitives.
779 try_vbo_merge(struct vbo_exec_context
*exec
)
781 struct _mesa_prim
*cur
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 1];
783 assert(exec
->vtx
.prim_count
>= 1);
785 vbo_try_prim_conversion(cur
);
787 if (exec
->vtx
.prim_count
>= 2) {
788 struct _mesa_prim
*prev
= &exec
->vtx
.prim
[exec
->vtx
.prim_count
- 2];
789 assert(prev
== cur
- 1);
791 if (vbo_can_merge_prims(prev
, cur
)) {
796 vbo_merge_prims(prev
, cur
);
797 exec
->vtx
.prim_count
--; /* drop the last primitive */
806 static void GLAPIENTRY
vbo_exec_End( void )
808 GET_CURRENT_CONTEXT( ctx
);
809 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
811 if (!_mesa_inside_begin_end(ctx
)) {
812 _mesa_error(ctx
, GL_INVALID_OPERATION
, "glEnd");
816 ctx
->Exec
= ctx
->OutsideBeginEnd
;
817 if (ctx
->CurrentDispatch
== ctx
->BeginEnd
) {
818 ctx
->CurrentDispatch
= ctx
->OutsideBeginEnd
;
819 _glapi_set_dispatch(ctx
->CurrentDispatch
);
822 if (exec
->vtx
.prim_count
> 0) {
823 /* close off current primitive */
824 int idx
= exec
->vtx
.vert_count
;
825 int i
= exec
->vtx
.prim_count
- 1;
827 exec
->vtx
.prim
[i
].end
= 1;
828 exec
->vtx
.prim
[i
].count
= idx
- exec
->vtx
.prim
[i
].start
;
833 ctx
->Driver
.CurrentExecPrimitive
= PRIM_OUTSIDE_BEGIN_END
;
835 if (exec
->vtx
.prim_count
== VBO_MAX_PRIM
)
836 vbo_exec_vtx_flush( exec
, GL_FALSE
);
838 if (MESA_DEBUG_FLAGS
& DEBUG_ALWAYS_FLUSH
) {
845 * Called via glPrimitiveRestartNV()
847 static void GLAPIENTRY
848 vbo_exec_PrimitiveRestartNV(void)
851 GET_CURRENT_CONTEXT( ctx
);
853 curPrim
= ctx
->Driver
.CurrentExecPrimitive
;
855 if (curPrim
== PRIM_OUTSIDE_BEGIN_END
) {
856 _mesa_error( ctx
, GL_INVALID_OPERATION
, "glPrimitiveRestartNV" );
860 vbo_exec_Begin(curPrim
);
866 static void vbo_exec_vtxfmt_init( struct vbo_exec_context
*exec
)
868 struct gl_context
*ctx
= exec
->ctx
;
869 GLvertexformat
*vfmt
= &exec
->vtxfmt
;
871 vfmt
->ArrayElement
= _ae_ArrayElement
;
873 vfmt
->Begin
= vbo_exec_Begin
;
874 vfmt
->End
= vbo_exec_End
;
875 vfmt
->PrimitiveRestartNV
= vbo_exec_PrimitiveRestartNV
;
877 vfmt
->CallList
= _mesa_CallList
;
878 vfmt
->CallLists
= _mesa_CallLists
;
880 vfmt
->EvalCoord1f
= vbo_exec_EvalCoord1f
;
881 vfmt
->EvalCoord1fv
= vbo_exec_EvalCoord1fv
;
882 vfmt
->EvalCoord2f
= vbo_exec_EvalCoord2f
;
883 vfmt
->EvalCoord2fv
= vbo_exec_EvalCoord2fv
;
884 vfmt
->EvalPoint1
= vbo_exec_EvalPoint1
;
885 vfmt
->EvalPoint2
= vbo_exec_EvalPoint2
;
887 /* from attrib_tmp.h:
889 vfmt
->Color3f
= vbo_Color3f
;
890 vfmt
->Color3fv
= vbo_Color3fv
;
891 vfmt
->Color4f
= vbo_Color4f
;
892 vfmt
->Color4fv
= vbo_Color4fv
;
893 vfmt
->FogCoordfEXT
= vbo_FogCoordfEXT
;
894 vfmt
->FogCoordfvEXT
= vbo_FogCoordfvEXT
;
895 vfmt
->MultiTexCoord1fARB
= vbo_MultiTexCoord1f
;
896 vfmt
->MultiTexCoord1fvARB
= vbo_MultiTexCoord1fv
;
897 vfmt
->MultiTexCoord2fARB
= vbo_MultiTexCoord2f
;
898 vfmt
->MultiTexCoord2fvARB
= vbo_MultiTexCoord2fv
;
899 vfmt
->MultiTexCoord3fARB
= vbo_MultiTexCoord3f
;
900 vfmt
->MultiTexCoord3fvARB
= vbo_MultiTexCoord3fv
;
901 vfmt
->MultiTexCoord4fARB
= vbo_MultiTexCoord4f
;
902 vfmt
->MultiTexCoord4fvARB
= vbo_MultiTexCoord4fv
;
903 vfmt
->Normal3f
= vbo_Normal3f
;
904 vfmt
->Normal3fv
= vbo_Normal3fv
;
905 vfmt
->SecondaryColor3fEXT
= vbo_SecondaryColor3fEXT
;
906 vfmt
->SecondaryColor3fvEXT
= vbo_SecondaryColor3fvEXT
;
907 vfmt
->TexCoord1f
= vbo_TexCoord1f
;
908 vfmt
->TexCoord1fv
= vbo_TexCoord1fv
;
909 vfmt
->TexCoord2f
= vbo_TexCoord2f
;
910 vfmt
->TexCoord2fv
= vbo_TexCoord2fv
;
911 vfmt
->TexCoord3f
= vbo_TexCoord3f
;
912 vfmt
->TexCoord3fv
= vbo_TexCoord3fv
;
913 vfmt
->TexCoord4f
= vbo_TexCoord4f
;
914 vfmt
->TexCoord4fv
= vbo_TexCoord4fv
;
915 vfmt
->Vertex2f
= vbo_Vertex2f
;
916 vfmt
->Vertex2fv
= vbo_Vertex2fv
;
917 vfmt
->Vertex3f
= vbo_Vertex3f
;
918 vfmt
->Vertex3fv
= vbo_Vertex3fv
;
919 vfmt
->Vertex4f
= vbo_Vertex4f
;
920 vfmt
->Vertex4fv
= vbo_Vertex4fv
;
922 if (ctx
->API
== API_OPENGLES2
) {
923 vfmt
->VertexAttrib1fARB
= _es_VertexAttrib1f
;
924 vfmt
->VertexAttrib1fvARB
= _es_VertexAttrib1fv
;
925 vfmt
->VertexAttrib2fARB
= _es_VertexAttrib2f
;
926 vfmt
->VertexAttrib2fvARB
= _es_VertexAttrib2fv
;
927 vfmt
->VertexAttrib3fARB
= _es_VertexAttrib3f
;
928 vfmt
->VertexAttrib3fvARB
= _es_VertexAttrib3fv
;
929 vfmt
->VertexAttrib4fARB
= _es_VertexAttrib4f
;
930 vfmt
->VertexAttrib4fvARB
= _es_VertexAttrib4fv
;
932 vfmt
->VertexAttrib1fARB
= vbo_VertexAttrib1fARB
;
933 vfmt
->VertexAttrib1fvARB
= vbo_VertexAttrib1fvARB
;
934 vfmt
->VertexAttrib2fARB
= vbo_VertexAttrib2fARB
;
935 vfmt
->VertexAttrib2fvARB
= vbo_VertexAttrib2fvARB
;
936 vfmt
->VertexAttrib3fARB
= vbo_VertexAttrib3fARB
;
937 vfmt
->VertexAttrib3fvARB
= vbo_VertexAttrib3fvARB
;
938 vfmt
->VertexAttrib4fARB
= vbo_VertexAttrib4fARB
;
939 vfmt
->VertexAttrib4fvARB
= vbo_VertexAttrib4fvARB
;
942 /* Note that VertexAttrib4fNV is used from dlist.c and api_arrayelt.c so
943 * they can have a single entrypoint for updating any of the legacy
946 vfmt
->VertexAttrib1fNV
= vbo_VertexAttrib1fNV
;
947 vfmt
->VertexAttrib1fvNV
= vbo_VertexAttrib1fvNV
;
948 vfmt
->VertexAttrib2fNV
= vbo_VertexAttrib2fNV
;
949 vfmt
->VertexAttrib2fvNV
= vbo_VertexAttrib2fvNV
;
950 vfmt
->VertexAttrib3fNV
= vbo_VertexAttrib3fNV
;
951 vfmt
->VertexAttrib3fvNV
= vbo_VertexAttrib3fvNV
;
952 vfmt
->VertexAttrib4fNV
= vbo_VertexAttrib4fNV
;
953 vfmt
->VertexAttrib4fvNV
= vbo_VertexAttrib4fvNV
;
956 vfmt
->VertexAttribI1i
= vbo_VertexAttribI1i
;
957 vfmt
->VertexAttribI2i
= vbo_VertexAttribI2i
;
958 vfmt
->VertexAttribI3i
= vbo_VertexAttribI3i
;
959 vfmt
->VertexAttribI4i
= vbo_VertexAttribI4i
;
960 vfmt
->VertexAttribI2iv
= vbo_VertexAttribI2iv
;
961 vfmt
->VertexAttribI3iv
= vbo_VertexAttribI3iv
;
962 vfmt
->VertexAttribI4iv
= vbo_VertexAttribI4iv
;
964 /* unsigned integer-valued */
965 vfmt
->VertexAttribI1ui
= vbo_VertexAttribI1ui
;
966 vfmt
->VertexAttribI2ui
= vbo_VertexAttribI2ui
;
967 vfmt
->VertexAttribI3ui
= vbo_VertexAttribI3ui
;
968 vfmt
->VertexAttribI4ui
= vbo_VertexAttribI4ui
;
969 vfmt
->VertexAttribI2uiv
= vbo_VertexAttribI2uiv
;
970 vfmt
->VertexAttribI3uiv
= vbo_VertexAttribI3uiv
;
971 vfmt
->VertexAttribI4uiv
= vbo_VertexAttribI4uiv
;
973 vfmt
->Materialfv
= vbo_Materialfv
;
975 vfmt
->EdgeFlag
= vbo_EdgeFlag
;
976 vfmt
->Indexf
= vbo_Indexf
;
977 vfmt
->Indexfv
= vbo_Indexfv
;
979 /* ARB_vertex_type_2_10_10_10_rev */
980 vfmt
->VertexP2ui
= vbo_VertexP2ui
;
981 vfmt
->VertexP2uiv
= vbo_VertexP2uiv
;
982 vfmt
->VertexP3ui
= vbo_VertexP3ui
;
983 vfmt
->VertexP3uiv
= vbo_VertexP3uiv
;
984 vfmt
->VertexP4ui
= vbo_VertexP4ui
;
985 vfmt
->VertexP4uiv
= vbo_VertexP4uiv
;
987 vfmt
->TexCoordP1ui
= vbo_TexCoordP1ui
;
988 vfmt
->TexCoordP1uiv
= vbo_TexCoordP1uiv
;
989 vfmt
->TexCoordP2ui
= vbo_TexCoordP2ui
;
990 vfmt
->TexCoordP2uiv
= vbo_TexCoordP2uiv
;
991 vfmt
->TexCoordP3ui
= vbo_TexCoordP3ui
;
992 vfmt
->TexCoordP3uiv
= vbo_TexCoordP3uiv
;
993 vfmt
->TexCoordP4ui
= vbo_TexCoordP4ui
;
994 vfmt
->TexCoordP4uiv
= vbo_TexCoordP4uiv
;
996 vfmt
->MultiTexCoordP1ui
= vbo_MultiTexCoordP1ui
;
997 vfmt
->MultiTexCoordP1uiv
= vbo_MultiTexCoordP1uiv
;
998 vfmt
->MultiTexCoordP2ui
= vbo_MultiTexCoordP2ui
;
999 vfmt
->MultiTexCoordP2uiv
= vbo_MultiTexCoordP2uiv
;
1000 vfmt
->MultiTexCoordP3ui
= vbo_MultiTexCoordP3ui
;
1001 vfmt
->MultiTexCoordP3uiv
= vbo_MultiTexCoordP3uiv
;
1002 vfmt
->MultiTexCoordP4ui
= vbo_MultiTexCoordP4ui
;
1003 vfmt
->MultiTexCoordP4uiv
= vbo_MultiTexCoordP4uiv
;
1005 vfmt
->NormalP3ui
= vbo_NormalP3ui
;
1006 vfmt
->NormalP3uiv
= vbo_NormalP3uiv
;
1008 vfmt
->ColorP3ui
= vbo_ColorP3ui
;
1009 vfmt
->ColorP3uiv
= vbo_ColorP3uiv
;
1010 vfmt
->ColorP4ui
= vbo_ColorP4ui
;
1011 vfmt
->ColorP4uiv
= vbo_ColorP4uiv
;
1013 vfmt
->SecondaryColorP3ui
= vbo_SecondaryColorP3ui
;
1014 vfmt
->SecondaryColorP3uiv
= vbo_SecondaryColorP3uiv
;
1016 vfmt
->VertexAttribP1ui
= vbo_VertexAttribP1ui
;
1017 vfmt
->VertexAttribP1uiv
= vbo_VertexAttribP1uiv
;
1018 vfmt
->VertexAttribP2ui
= vbo_VertexAttribP2ui
;
1019 vfmt
->VertexAttribP2uiv
= vbo_VertexAttribP2uiv
;
1020 vfmt
->VertexAttribP3ui
= vbo_VertexAttribP3ui
;
1021 vfmt
->VertexAttribP3uiv
= vbo_VertexAttribP3uiv
;
1022 vfmt
->VertexAttribP4ui
= vbo_VertexAttribP4ui
;
1023 vfmt
->VertexAttribP4uiv
= vbo_VertexAttribP4uiv
;
1025 vfmt
->VertexAttribL1d
= vbo_VertexAttribL1d
;
1026 vfmt
->VertexAttribL2d
= vbo_VertexAttribL2d
;
1027 vfmt
->VertexAttribL3d
= vbo_VertexAttribL3d
;
1028 vfmt
->VertexAttribL4d
= vbo_VertexAttribL4d
;
1030 vfmt
->VertexAttribL1dv
= vbo_VertexAttribL1dv
;
1031 vfmt
->VertexAttribL2dv
= vbo_VertexAttribL2dv
;
1032 vfmt
->VertexAttribL3dv
= vbo_VertexAttribL3dv
;
1033 vfmt
->VertexAttribL4dv
= vbo_VertexAttribL4dv
;
1038 * Tell the VBO module to use a real OpenGL vertex buffer object to
1039 * store accumulated immediate-mode vertex data.
1040 * This replaces the malloced buffer which was created in
1041 * vb_exec_vtx_init() below.
1043 void vbo_use_buffer_objects(struct gl_context
*ctx
)
1045 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
1046 /* Any buffer name but 0 can be used here since this bufferobj won't
1047 * go into the bufferobj hashtable.
1049 GLuint bufName
= IMM_BUFFER_NAME
;
1050 GLenum target
= GL_ARRAY_BUFFER_ARB
;
1051 GLenum usage
= GL_STREAM_DRAW_ARB
;
1052 GLsizei size
= VBO_VERT_BUFFER_SIZE
;
1054 /* Make sure this func is only used once */
1055 assert(exec
->vtx
.bufferobj
== ctx
->Shared
->NullBufferObj
);
1057 _mesa_align_free(exec
->vtx
.buffer_map
);
1058 exec
->vtx
.buffer_map
= NULL
;
1059 exec
->vtx
.buffer_ptr
= NULL
;
1061 /* Allocate a real buffer object now */
1062 _mesa_reference_buffer_object(ctx
, &exec
->vtx
.bufferobj
, NULL
);
1063 exec
->vtx
.bufferobj
= ctx
->Driver
.NewBufferObject(ctx
, bufName
);
1064 if (!ctx
->Driver
.BufferData(ctx
, target
, size
, NULL
, usage
,
1066 GL_DYNAMIC_STORAGE_BIT
|
1067 GL_CLIENT_STORAGE_BIT
,
1068 exec
->vtx
.bufferobj
)) {
1069 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "VBO allocation");
1075 * If this function is called, all VBO buffers will be unmapped when
1077 * Otherwise, if a simple command like glColor3f() is called and we flush,
1078 * the current VBO may be left mapped.
1081 vbo_always_unmap_buffers(struct gl_context
*ctx
)
1083 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
1084 exec
->begin_vertices_flags
|= FLUSH_STORED_VERTICES
;
1088 void vbo_exec_vtx_init( struct vbo_exec_context
*exec
)
1090 struct gl_context
*ctx
= exec
->ctx
;
1091 struct vbo_context
*vbo
= vbo_context(ctx
);
1094 /* Allocate a buffer object. Will just reuse this object
1095 * continuously, unless vbo_use_buffer_objects() is called to enable
1098 _mesa_reference_buffer_object(ctx
,
1099 &exec
->vtx
.bufferobj
,
1100 ctx
->Shared
->NullBufferObj
);
1102 assert(!exec
->vtx
.buffer_map
);
1103 exec
->vtx
.buffer_map
= _mesa_align_malloc(VBO_VERT_BUFFER_SIZE
, 64);
1104 exec
->vtx
.buffer_ptr
= exec
->vtx
.buffer_map
;
1106 vbo_exec_vtxfmt_init( exec
);
1107 _mesa_noop_vtxfmt_init(&exec
->vtxfmt_noop
);
1109 for (i
= 0 ; i
< VBO_ATTRIB_MAX
; i
++) {
1110 assert(i
< ARRAY_SIZE(exec
->vtx
.attrsz
));
1111 exec
->vtx
.attrsz
[i
] = 0;
1112 assert(i
< ARRAY_SIZE(exec
->vtx
.attrtype
));
1113 exec
->vtx
.attrtype
[i
] = GL_FLOAT
;
1114 assert(i
< ARRAY_SIZE(exec
->vtx
.active_sz
));
1115 exec
->vtx
.active_sz
[i
] = 0;
1117 for (i
= 0 ; i
< VERT_ATTRIB_MAX
; i
++) {
1118 assert(i
< ARRAY_SIZE(exec
->vtx
.inputs
));
1119 assert(i
< ARRAY_SIZE(exec
->vtx
.arrays
));
1120 exec
->vtx
.inputs
[i
] = &exec
->vtx
.arrays
[i
];
1124 struct gl_client_array
*arrays
= exec
->vtx
.arrays
;
1127 memcpy(arrays
, &vbo
->currval
[VBO_ATTRIB_POS
],
1128 VERT_ATTRIB_FF_MAX
* sizeof(arrays
[0]));
1129 for (i
= 0; i
< VERT_ATTRIB_FF_MAX
; ++i
) {
1130 struct gl_client_array
*array
;
1131 array
= &arrays
[VERT_ATTRIB_FF(i
)];
1132 array
->BufferObj
= NULL
;
1133 _mesa_reference_buffer_object(ctx
, &array
->BufferObj
,
1134 vbo
->currval
[VBO_ATTRIB_POS
+i
].BufferObj
);
1137 memcpy(arrays
+ VERT_ATTRIB_GENERIC(0),
1138 &vbo
->currval
[VBO_ATTRIB_GENERIC0
],
1139 VERT_ATTRIB_GENERIC_MAX
* sizeof(arrays
[0]));
1141 for (i
= 0; i
< VERT_ATTRIB_GENERIC_MAX
; ++i
) {
1142 struct gl_client_array
*array
;
1143 array
= &arrays
[VERT_ATTRIB_GENERIC(i
)];
1144 array
->BufferObj
= NULL
;
1145 _mesa_reference_buffer_object(ctx
, &array
->BufferObj
,
1146 vbo
->currval
[VBO_ATTRIB_GENERIC0
+i
].BufferObj
);
1150 exec
->vtx
.vertex_size
= 0;
1152 exec
->begin_vertices_flags
= FLUSH_UPDATE_CURRENT
;
1156 void vbo_exec_vtx_destroy( struct vbo_exec_context
*exec
)
1158 /* using a real VBO for vertex data */
1159 struct gl_context
*ctx
= exec
->ctx
;
1162 /* True VBOs should already be unmapped
1164 if (exec
->vtx
.buffer_map
) {
1165 assert(exec
->vtx
.bufferobj
->Name
== 0 ||
1166 exec
->vtx
.bufferobj
->Name
== IMM_BUFFER_NAME
);
1167 if (exec
->vtx
.bufferobj
->Name
== 0) {
1168 _mesa_align_free(exec
->vtx
.buffer_map
);
1169 exec
->vtx
.buffer_map
= NULL
;
1170 exec
->vtx
.buffer_ptr
= NULL
;
1174 /* Drop any outstanding reference to the vertex buffer
1176 for (i
= 0; i
< ARRAY_SIZE(exec
->vtx
.arrays
); i
++) {
1177 _mesa_reference_buffer_object(ctx
,
1178 &exec
->vtx
.arrays
[i
].BufferObj
,
1182 /* Free the vertex buffer. Unmap first if needed.
1184 if (_mesa_bufferobj_mapped(exec
->vtx
.bufferobj
, MAP_INTERNAL
)) {
1185 ctx
->Driver
.UnmapBuffer(ctx
, exec
->vtx
.bufferobj
, MAP_INTERNAL
);
1187 _mesa_reference_buffer_object(ctx
, &exec
->vtx
.bufferobj
, NULL
);
1192 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1193 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1194 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1195 * __struct gl_contextRec::Current and gl_light_attrib::Material
1197 * Note that the default T&L engine never clears the
1198 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1200 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1202 void vbo_exec_FlushVertices( struct gl_context
*ctx
, GLuint flags
)
1204 struct vbo_exec_context
*exec
= &vbo_context(ctx
)->exec
;
1207 /* debug check: make sure we don't get called recursively */
1208 exec
->flush_call_depth
++;
1209 assert(exec
->flush_call_depth
== 1);
1212 if (_mesa_inside_begin_end(ctx
)) {
1213 /* We've had glBegin but not glEnd! */
1215 exec
->flush_call_depth
--;
1216 assert(exec
->flush_call_depth
== 0);
1221 /* Flush (draw), and make sure VBO is left unmapped when done */
1222 vbo_exec_FlushVertices_internal(exec
, GL_TRUE
);
1224 /* Need to do this to ensure vbo_exec_begin_vertices gets called again:
1226 ctx
->Driver
.NeedFlush
&= ~(FLUSH_UPDATE_CURRENT
| flags
);
1229 exec
->flush_call_depth
--;
1230 assert(exec
->flush_call_depth
== 0);
1235 static void reset_attrfv( struct vbo_exec_context
*exec
)
1239 for (i
= 0 ; i
< VBO_ATTRIB_MAX
; i
++) {
1240 exec
->vtx
.attrsz
[i
] = 0;
1241 exec
->vtx
.attrtype
[i
] = GL_FLOAT
;
1242 exec
->vtx
.active_sz
[i
] = 0;
1245 exec
->vtx
.vertex_size
= 0;
1250 _es_Color4f(GLfloat r
, GLfloat g
, GLfloat b
, GLfloat a
)
1252 vbo_Color4f(r
, g
, b
, a
);
1257 _es_Normal3f(GLfloat x
, GLfloat y
, GLfloat z
)
1259 vbo_Normal3f(x
, y
, z
);
1264 _es_MultiTexCoord4f(GLenum target
, GLfloat s
, GLfloat t
, GLfloat r
, GLfloat q
)
1266 vbo_MultiTexCoord4f(target
, s
, t
, r
, q
);
1271 _es_Materialfv(GLenum face
, GLenum pname
, const GLfloat
*params
)
1273 vbo_Materialfv(face
, pname
, params
);
1278 _es_Materialf(GLenum face
, GLenum pname
, GLfloat param
)
1282 p
[1] = p
[2] = p
[3] = 0.0F
;
1283 vbo_Materialfv(face
, pname
, p
);
1288 * A special version of glVertexAttrib4f that does not treat index 0 as
1292 VertexAttrib4f_nopos(GLuint index
, GLfloat x
, GLfloat y
, GLfloat z
, GLfloat w
)
1294 GET_CURRENT_CONTEXT(ctx
);
1295 if (index
< MAX_VERTEX_GENERIC_ATTRIBS
)
1296 ATTRF(VBO_ATTRIB_GENERIC0
+ index
, 4, x
, y
, z
, w
);
1298 ERROR(GL_INVALID_VALUE
);
1302 _es_VertexAttrib4f(GLuint index
, GLfloat x
, GLfloat y
, GLfloat z
, GLfloat w
)
1304 VertexAttrib4f_nopos(index
, x
, y
, z
, w
);
1309 _es_VertexAttrib1f(GLuint indx
, GLfloat x
)
1311 VertexAttrib4f_nopos(indx
, x
, 0.0f
, 0.0f
, 1.0f
);
1316 _es_VertexAttrib1fv(GLuint indx
, const GLfloat
* values
)
1318 VertexAttrib4f_nopos(indx
, values
[0], 0.0f
, 0.0f
, 1.0f
);
1323 _es_VertexAttrib2f(GLuint indx
, GLfloat x
, GLfloat y
)
1325 VertexAttrib4f_nopos(indx
, x
, y
, 0.0f
, 1.0f
);
1330 _es_VertexAttrib2fv(GLuint indx
, const GLfloat
* values
)
1332 VertexAttrib4f_nopos(indx
, values
[0], values
[1], 0.0f
, 1.0f
);
1337 _es_VertexAttrib3f(GLuint indx
, GLfloat x
, GLfloat y
, GLfloat z
)
1339 VertexAttrib4f_nopos(indx
, x
, y
, z
, 1.0f
);
1344 _es_VertexAttrib3fv(GLuint indx
, const GLfloat
* values
)
1346 VertexAttrib4f_nopos(indx
, values
[0], values
[1], values
[2], 1.0f
);
1351 _es_VertexAttrib4fv(GLuint indx
, const GLfloat
* values
)
1353 VertexAttrib4f_nopos(indx
, values
[0], values
[1], values
[2], values
[3]);