vbo: remove redundant code in vbo_exec_fixup_vertex
[mesa.git] / src / mesa / vbo / vbo_exec_api.c
1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46
47 #include "vbo_noop.h"
48 #include "vbo_private.h"
49
50
51 /** ID/name for immediate-mode VBO */
52 #define IMM_BUFFER_NAME 0xaabbccdd
53
54
55 static void
56 vbo_reset_all_attr(struct vbo_exec_context *exec);
57
58
59 /**
60 * Close off the last primitive, execute the buffer, restart the
61 * primitive. This is called when we fill a vertex buffer before
62 * hitting glEnd.
63 */
64 static void
65 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
66 {
67 if (exec->vtx.prim_count == 0) {
68 exec->vtx.copied.nr = 0;
69 exec->vtx.vert_count = 0;
70 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
71 }
72 else {
73 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
74 const GLuint last_begin = last_prim->begin;
75 GLuint last_count;
76
77 if (_mesa_inside_begin_end(exec->ctx)) {
78 last_prim->count = exec->vtx.vert_count - last_prim->start;
79 }
80
81 last_count = last_prim->count;
82
83 /* Special handling for wrapping GL_LINE_LOOP */
84 if (last_prim->mode == GL_LINE_LOOP &&
85 last_count > 0 &&
86 !last_prim->end) {
87 /* draw this section of the incomplete line loop as a line strip */
88 last_prim->mode = GL_LINE_STRIP;
89 if (!last_prim->begin) {
90 /* This is not the first section of the line loop, so don't
91 * draw the 0th vertex. We're saving it until we draw the
92 * very last section of the loop.
93 */
94 last_prim->start++;
95 last_prim->count--;
96 }
97 }
98
99 /* Execute the buffer and save copied vertices.
100 */
101 if (exec->vtx.vert_count)
102 vbo_exec_vtx_flush(exec);
103 else {
104 exec->vtx.prim_count = 0;
105 exec->vtx.copied.nr = 0;
106 }
107
108 /* Emit a glBegin to start the new list.
109 */
110 assert(exec->vtx.prim_count == 0);
111
112 if (_mesa_inside_begin_end(exec->ctx)) {
113 exec->vtx.prim[0].mode = exec->ctx->Driver.CurrentExecPrimitive;
114 exec->vtx.prim[0].begin = 0;
115 exec->vtx.prim[0].end = 0;
116 exec->vtx.prim[0].start = 0;
117 exec->vtx.prim[0].count = 0;
118 exec->vtx.prim_count++;
119
120 if (exec->vtx.copied.nr == last_count)
121 exec->vtx.prim[0].begin = last_begin;
122 }
123 }
124 }
125
126
127 /**
128 * Deal with buffer wrapping where provoked by the vertex buffer
129 * filling up, as opposed to upgrade_vertex().
130 */
131 static void
132 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
133 {
134 unsigned numComponents;
135
136 /* Run pipeline on current vertices, copy wrapped vertices
137 * to exec->vtx.copied.
138 */
139 vbo_exec_wrap_buffers(exec);
140
141 if (!exec->vtx.buffer_ptr) {
142 /* probably ran out of memory earlier when allocating the VBO */
143 return;
144 }
145
146 /* Copy stored stored vertices to start of new list.
147 */
148 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
149
150 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
151 memcpy(exec->vtx.buffer_ptr,
152 exec->vtx.copied.buffer,
153 numComponents * sizeof(fi_type));
154 exec->vtx.buffer_ptr += numComponents;
155 exec->vtx.vert_count += exec->vtx.copied.nr;
156
157 exec->vtx.copied.nr = 0;
158 }
159
160
161 /**
162 * Copy the active vertex's values to the ctx->Current fields.
163 */
164 static void
165 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
166 {
167 struct gl_context *ctx = exec->ctx;
168 struct vbo_context *vbo = vbo_context(ctx);
169 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
170
171 while (enabled) {
172 const int i = u_bit_scan64(&enabled);
173
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
176 */
177 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
178 fi_type tmp[8]; /* space for doubles */
179 int dmul = 1;
180
181 if (exec->vtx.attr[i].type == GL_DOUBLE ||
182 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB)
183 dmul = 2;
184
185 assert(exec->vtx.attr[i].size);
186
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
189 memset(tmp, 0, sizeof(tmp));
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
191 } else {
192 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
193 exec->vtx.attr[i].size,
194 exec->vtx.attrptr[i],
195 exec->vtx.attr[i].type);
196 }
197
198 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
199 memcmp(current, tmp, 4 * sizeof(GLfloat) * dmul) != 0) {
200 memcpy(current, tmp, 4 * sizeof(GLfloat) * dmul);
201
202 /* Given that we explicitly state size here, there is no need
203 * for the COPY_CLEAN above, could just copy 16 bytes and be
204 * done. The only problem is when Mesa accesses ctx->Current
205 * directly.
206 */
207 /* Size here is in components - not bytes */
208 vbo_set_vertex_format(&vbo->current[i].Format,
209 exec->vtx.attr[i].size / dmul,
210 exec->vtx.attr[i].type);
211
212 /* This triggers rather too much recalculation of Mesa state
213 * that doesn't get used (eg light positions).
214 */
215 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT &&
216 i <= VBO_ATTRIB_MAT_BACK_INDEXES)
217 ctx->NewState |= _NEW_LIGHT;
218
219 ctx->NewState |= _NEW_CURRENT_ATTRIB;
220 }
221 }
222
223 /* Colormaterial -- this kindof sucks.
224 */
225 if (ctx->Light.ColorMaterialEnabled &&
226 exec->vtx.attr[VBO_ATTRIB_COLOR0].size) {
227 _mesa_update_color_material(ctx,
228 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
229 }
230 }
231
232
233 /**
234 * Flush existing data, set new attrib size, replay copied vertices.
235 * This is called when we transition from a small vertex attribute size
236 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
237 * We need to go back over the previous 2-component texcoords and insert
238 * zero and one values.
239 * \param attr VBO_ATTRIB_x vertex attribute value
240 */
241 static void
242 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
243 GLuint attr, GLuint newSize)
244 {
245 struct gl_context *ctx = exec->ctx;
246 struct vbo_context *vbo = vbo_context(ctx);
247 const GLint lastcount = exec->vtx.vert_count;
248 fi_type *old_attrptr[VBO_ATTRIB_MAX];
249 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
250 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
251 const GLuint oldSize = exec->vtx.attr[attr].size;
252 GLuint i;
253
254 assert(attr < VBO_ATTRIB_MAX);
255
256 /* Run pipeline on current vertices, copy wrapped vertices
257 * to exec->vtx.copied.
258 */
259 vbo_exec_wrap_buffers(exec);
260
261 if (unlikely(exec->vtx.copied.nr)) {
262 /* We're in the middle of a primitive, keep the old vertex
263 * format around to be able to translate the copied vertices to
264 * the new format.
265 */
266 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
267 }
268
269 /* Heuristic: Attempt to isolate attributes received outside
270 * begin/end so that they don't bloat the vertices.
271 */
272 if (!_mesa_inside_begin_end(ctx) &&
273 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
274 vbo_exec_copy_to_current(exec);
275 vbo_reset_all_attr(exec);
276 }
277
278 /* Fix up sizes:
279 */
280 exec->vtx.attr[attr].size = newSize;
281 exec->vtx.vertex_size += newSize - oldSize;
282 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
283 exec->vtx.max_vert = vbo_compute_max_verts(exec);
284 exec->vtx.vert_count = 0;
285 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
286 exec->vtx.enabled |= BITFIELD64_BIT(attr);
287
288 if (attr != 0) {
289 if (unlikely(oldSize)) {
290 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
291
292 /* If there are attribs after the resized attrib... */
293 if (offset + oldSize < old_vtx_size_no_pos) {
294 int size_diff = newSize - oldSize;
295 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
296 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
297 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
298 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
299
300 if (size_diff < 0) {
301 /* Decreasing the size: Copy from first to last to move
302 * elements to the left.
303 */
304 fi_type *old_end = old_last + 1;
305 fi_type *old = old_first;
306 fi_type *new = new_first;
307
308 do {
309 *new++ = *old++;
310 } while (old != old_end);
311 } else {
312 /* Increasing the size: Copy from last to first to move
313 * elements to the right.
314 */
315 fi_type *old_end = old_first - 1;
316 fi_type *old = old_last;
317 fi_type *new = new_last;
318
319 do {
320 *new-- = *old--;
321 } while (old != old_end);
322 }
323
324 /* Update pointers to attribs, because we moved them. */
325 GLbitfield64 enabled = exec->vtx.enabled &
326 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
327 ~BITFIELD64_BIT(attr);
328 while (enabled) {
329 unsigned i = u_bit_scan64(&enabled);
330
331 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
332 exec->vtx.attrptr[i] += size_diff;
333 }
334 }
335 } else {
336 /* Just have to append the new attribute at the end */
337 exec->vtx.attrptr[attr] = exec->vtx.vertex +
338 exec->vtx.vertex_size_no_pos - newSize;
339 }
340 }
341
342 /* The position is always last. */
343 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
344
345 /* Replay stored vertices to translate them
346 * to new format here.
347 *
348 * -- No need to replay - just copy piecewise
349 */
350 if (unlikely(exec->vtx.copied.nr)) {
351 fi_type *data = exec->vtx.copied.buffer;
352 fi_type *dest = exec->vtx.buffer_ptr;
353
354 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
355
356 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
357 GLbitfield64 enabled = exec->vtx.enabled;
358 while (enabled) {
359 const int j = u_bit_scan64(&enabled);
360 GLuint sz = exec->vtx.attr[j].size;
361 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
362 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
363
364 assert(sz);
365
366 if (j == attr) {
367 if (oldSize) {
368 fi_type tmp[4];
369 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
370 data + old_offset,
371 exec->vtx.attr[j].type);
372 COPY_SZ_4V(dest + new_offset, newSize, tmp);
373 } else {
374 fi_type *current = (fi_type *)vbo->current[j].Ptr;
375 COPY_SZ_4V(dest + new_offset, sz, current);
376 }
377 }
378 else {
379 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
380 }
381 }
382
383 data += old_vtx_size;
384 dest += exec->vtx.vertex_size;
385 }
386
387 exec->vtx.buffer_ptr = dest;
388 exec->vtx.vert_count += exec->vtx.copied.nr;
389 exec->vtx.copied.nr = 0;
390 }
391 }
392
393
394 /**
395 * This is when a vertex attribute transitions to a different size.
396 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
397 * glTexCoord4f() call. We promote the array from size=2 to size=4.
398 * \param newSize size of new vertex (number of 32-bit words).
399 * \param attr VBO_ATTRIB_x vertex attribute value
400 */
401 static void
402 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
403 GLuint newSize, GLenum newType)
404 {
405 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
406
407 assert(attr < VBO_ATTRIB_MAX);
408
409 if (newSize > exec->vtx.attr[attr].size ||
410 newType != exec->vtx.attr[attr].type) {
411 /* New size is larger. Need to flush existing vertices and get
412 * an enlarged vertex format.
413 */
414 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize);
415 }
416 else if (newSize < exec->vtx.attr[attr].active_size) {
417 GLuint i;
418 const fi_type *id =
419 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
420
421 /* New size is smaller - just need to fill in some
422 * zeros. Don't need to flush or wrap.
423 */
424 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
425 exec->vtx.attrptr[attr][i-1] = id[i-1];
426 }
427
428 exec->vtx.attr[attr].active_size = newSize;
429 exec->vtx.attr[attr].type = newType;
430 }
431
432
433 /**
434 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
435 * It depends on a few things, including whether we're inside or outside
436 * of glBegin/glEnd.
437 */
438 static inline bool
439 is_vertex_position(const struct gl_context *ctx, GLuint index)
440 {
441 return (index == 0 &&
442 _mesa_attr_zero_aliases_vertex(ctx) &&
443 _mesa_inside_begin_end(ctx));
444 }
445
446 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
447 #if UTIL_ARCH_LITTLE_ENDIAN
448 #define SET_64BIT(dst32, u64) do { \
449 *(dst32)++ = (u64); \
450 *(dst32)++ = (uint64_t)(u64) >> 32; \
451 } while (0)
452 #else
453 #define SET_64BIT(dst32, u64) do { \
454 *(dst32)++ = (uint64_t)(u64) >> 32; \
455 *(dst32)++ = (u64); \
456 } while (0)
457 #endif
458
459
460 /**
461 * This macro is used to implement all the glVertex, glColor, glTexCoord,
462 * glVertexAttrib, etc functions.
463 * \param A VBO_ATTRIB_x attribute index
464 * \param N attribute size (1..4)
465 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
466 * \param C cast type (uint32_t or uint64_t)
467 * \param V0, V1, v2, V3 attribute value
468 */
469 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
470 do { \
471 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
472 int sz = (sizeof(C) / sizeof(GLfloat)); \
473 \
474 assert(sz == 1 || sz == 2); \
475 \
476 /* check if attribute size or type is changing */ \
477 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
478 exec->vtx.attr[A].type != T)) { \
479 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
480 } \
481 \
482 /* store a copy of the attribute in exec except for glVertex */ \
483 if ((A) != 0) { \
484 C *dest = (C *)exec->vtx.attrptr[A]; \
485 if (N>0) dest[0] = V0; \
486 if (N>1) dest[1] = V1; \
487 if (N>2) dest[2] = V2; \
488 if (N>3) dest[3] = V3; \
489 assert(exec->vtx.attr[A].type == T); \
490 } \
491 \
492 if ((A) == 0) { \
493 /* This is a glVertex call */ \
494 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
495 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
496 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
497 \
498 /* Copy over attributes from exec. */ \
499 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
500 *dst++ = *src++; \
501 \
502 /* Store the position, which is always last and can have 32 or */ \
503 /* 64 bits per channel. */ \
504 if (sizeof(C) == 4) { \
505 if (N > 0) *dst++ = V0; \
506 if (N > 1) *dst++ = V1; \
507 if (N > 2) *dst++ = V2; \
508 if (N > 3) *dst++ = V3; \
509 } else { \
510 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
511 /* separately */ \
512 if (N > 0) SET_64BIT(dst, V0); \
513 if (N > 1) SET_64BIT(dst, V1); \
514 if (N > 2) SET_64BIT(dst, V2); \
515 if (N > 3) SET_64BIT(dst, V3); \
516 } \
517 \
518 /* dst now points at the beginning of the next vertex */ \
519 exec->vtx.buffer_ptr = (fi_type*)dst; \
520 \
521 /* Don't set FLUSH_UPDATE_CURRENT because */ \
522 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
523 \
524 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
525 vbo_exec_vtx_wrap(exec); \
526 } else { \
527 /* we now have accumulated per-vertex attributes */ \
528 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
529 } \
530 } while (0)
531
532
533 #undef ERROR
534 #define ERROR(err) _mesa_error(ctx, err, __func__)
535 #define TAG(x) vbo_exec_##x
536
537 #include "vbo_attrib_tmp.h"
538
539
540
541 /**
542 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
543 * this may be a (partial) no-op.
544 */
545 static void GLAPIENTRY
546 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
547 {
548 GLbitfield updateMats;
549 GET_CURRENT_CONTEXT(ctx);
550
551 /* This function should be a no-op when it tries to update material
552 * attributes which are currently tracking glColor via glColorMaterial.
553 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
554 * indicating which material attributes can actually be updated below.
555 */
556 if (ctx->Light.ColorMaterialEnabled) {
557 updateMats = ~ctx->Light._ColorMaterialBitmask;
558 }
559 else {
560 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
561 updateMats = ALL_MATERIAL_BITS;
562 }
563
564 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
565 updateMats &= FRONT_MATERIAL_BITS;
566 }
567 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
568 updateMats &= BACK_MATERIAL_BITS;
569 }
570 else if (face != GL_FRONT_AND_BACK) {
571 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
572 return;
573 }
574
575 switch (pname) {
576 case GL_EMISSION:
577 if (updateMats & MAT_BIT_FRONT_EMISSION)
578 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
579 if (updateMats & MAT_BIT_BACK_EMISSION)
580 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
581 break;
582 case GL_AMBIENT:
583 if (updateMats & MAT_BIT_FRONT_AMBIENT)
584 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
585 if (updateMats & MAT_BIT_BACK_AMBIENT)
586 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
587 break;
588 case GL_DIFFUSE:
589 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
590 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
591 if (updateMats & MAT_BIT_BACK_DIFFUSE)
592 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
593 break;
594 case GL_SPECULAR:
595 if (updateMats & MAT_BIT_FRONT_SPECULAR)
596 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
597 if (updateMats & MAT_BIT_BACK_SPECULAR)
598 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
599 break;
600 case GL_SHININESS:
601 if (*params < 0 || *params > ctx->Const.MaxShininess) {
602 _mesa_error(ctx, GL_INVALID_VALUE,
603 "glMaterial(invalid shininess: %f out range [0, %f])",
604 *params, ctx->Const.MaxShininess);
605 return;
606 }
607 if (updateMats & MAT_BIT_FRONT_SHININESS)
608 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
609 if (updateMats & MAT_BIT_BACK_SHININESS)
610 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
611 break;
612 case GL_COLOR_INDEXES:
613 if (ctx->API != API_OPENGL_COMPAT) {
614 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
615 return;
616 }
617 if (updateMats & MAT_BIT_FRONT_INDEXES)
618 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
619 if (updateMats & MAT_BIT_BACK_INDEXES)
620 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
621 break;
622 case GL_AMBIENT_AND_DIFFUSE:
623 if (updateMats & MAT_BIT_FRONT_AMBIENT)
624 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
625 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
626 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
627 if (updateMats & MAT_BIT_BACK_AMBIENT)
628 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
629 if (updateMats & MAT_BIT_BACK_DIFFUSE)
630 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
631 break;
632 default:
633 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
634 return;
635 }
636 }
637
638
639 /**
640 * Flush (draw) vertices.
641 *
642 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
643 */
644 static void
645 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
646 {
647 struct gl_context *ctx = exec->ctx;
648
649 if (flags & FLUSH_STORED_VERTICES) {
650 if (exec->vtx.vert_count) {
651 vbo_exec_vtx_flush(exec);
652 }
653
654 if (exec->vtx.vertex_size) {
655 vbo_exec_copy_to_current(exec);
656 vbo_reset_all_attr(exec);
657 }
658
659 /* All done. */
660 ctx->Driver.NeedFlush = 0;
661 } else {
662 assert(flags == FLUSH_UPDATE_CURRENT);
663
664 /* Note that the vertex size is unchanged.
665 * (vbo_reset_all_attr isn't called)
666 */
667 vbo_exec_copy_to_current(exec);
668
669 /* Only FLUSH_UPDATE_CURRENT is done. */
670 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
671 }
672 }
673
674
675 static void GLAPIENTRY
676 vbo_exec_EvalCoord1f(GLfloat u)
677 {
678 GET_CURRENT_CONTEXT(ctx);
679 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
680
681 {
682 GLint i;
683 if (exec->eval.recalculate_maps)
684 vbo_exec_eval_update(exec);
685
686 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
687 if (exec->eval.map1[i].map)
688 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
689 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
690 }
691 }
692
693 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
694 exec->vtx.vertex_size * sizeof(GLfloat));
695
696 vbo_exec_do_EvalCoord1f(exec, u);
697
698 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
699 exec->vtx.vertex_size * sizeof(GLfloat));
700 }
701
702
703 static void GLAPIENTRY
704 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v)
705 {
706 GET_CURRENT_CONTEXT(ctx);
707 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
708
709 {
710 GLint i;
711 if (exec->eval.recalculate_maps)
712 vbo_exec_eval_update(exec);
713
714 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
715 if (exec->eval.map2[i].map)
716 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
717 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
718 }
719
720 if (ctx->Eval.AutoNormal)
721 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
722 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
723 }
724
725 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
726 exec->vtx.vertex_size * sizeof(GLfloat));
727
728 vbo_exec_do_EvalCoord2f(exec, u, v);
729
730 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
731 exec->vtx.vertex_size * sizeof(GLfloat));
732 }
733
734
735 static void GLAPIENTRY
736 vbo_exec_EvalCoord1fv(const GLfloat *u)
737 {
738 vbo_exec_EvalCoord1f(u[0]);
739 }
740
741
742 static void GLAPIENTRY
743 vbo_exec_EvalCoord2fv(const GLfloat *u)
744 {
745 vbo_exec_EvalCoord2f(u[0], u[1]);
746 }
747
748
749 static void GLAPIENTRY
750 vbo_exec_EvalPoint1(GLint i)
751 {
752 GET_CURRENT_CONTEXT(ctx);
753 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
754 (GLfloat) ctx->Eval.MapGrid1un);
755 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
756
757 vbo_exec_EvalCoord1f(u);
758 }
759
760
761 static void GLAPIENTRY
762 vbo_exec_EvalPoint2(GLint i, GLint j)
763 {
764 GET_CURRENT_CONTEXT(ctx);
765 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
766 (GLfloat) ctx->Eval.MapGrid2un);
767 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
768 (GLfloat) ctx->Eval.MapGrid2vn);
769 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
770 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
771
772 vbo_exec_EvalCoord2f(u, v);
773 }
774
775
776 /**
777 * Called via glBegin.
778 */
779 static void GLAPIENTRY
780 vbo_exec_Begin(GLenum mode)
781 {
782 GET_CURRENT_CONTEXT(ctx);
783 struct vbo_context *vbo = vbo_context(ctx);
784 struct vbo_exec_context *exec = &vbo->exec;
785 int i;
786
787 if (_mesa_inside_begin_end(ctx)) {
788 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
789 return;
790 }
791
792 if (!_mesa_valid_prim_mode(ctx, mode, "glBegin")) {
793 return;
794 }
795
796 if (!_mesa_valid_to_render(ctx, "glBegin")) {
797 return;
798 }
799
800 /* Heuristic: attempt to isolate attributes occurring outside
801 * begin/end pairs.
802 *
803 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
804 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
805 */
806 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
807 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
808
809 i = exec->vtx.prim_count++;
810 exec->vtx.prim[i].mode = mode;
811 exec->vtx.prim[i].begin = 1;
812 exec->vtx.prim[i].end = 0;
813 exec->vtx.prim[i].indexed = 0;
814 exec->vtx.prim[i].start = exec->vtx.vert_count;
815 exec->vtx.prim[i].count = 0;
816 exec->vtx.prim[i].num_instances = 1;
817 exec->vtx.prim[i].base_instance = 0;
818
819 ctx->Driver.CurrentExecPrimitive = mode;
820
821 ctx->Exec = ctx->BeginEnd;
822
823 /* We may have been called from a display list, in which case we should
824 * leave dlist.c's dispatch table in place.
825 */
826 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
827 ctx->CurrentServerDispatch = ctx->Exec;
828 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
829 ctx->CurrentClientDispatch = ctx->Exec;
830 _glapi_set_dispatch(ctx->CurrentClientDispatch);
831 } else {
832 assert(ctx->CurrentClientDispatch == ctx->Save);
833 }
834 }
835
836
837 /**
838 * Try to merge / concatenate the two most recent VBO primitives.
839 */
840 static void
841 try_vbo_merge(struct vbo_exec_context *exec)
842 {
843 struct _mesa_prim *cur = &exec->vtx.prim[exec->vtx.prim_count - 1];
844
845 assert(exec->vtx.prim_count >= 1);
846
847 vbo_try_prim_conversion(cur);
848
849 if (exec->vtx.prim_count >= 2) {
850 struct _mesa_prim *prev = &exec->vtx.prim[exec->vtx.prim_count - 2];
851 assert(prev == cur - 1);
852
853 if (vbo_can_merge_prims(prev, cur)) {
854 assert(cur->begin);
855 assert(cur->end);
856 assert(prev->begin);
857 assert(prev->end);
858 vbo_merge_prims(prev, cur);
859 exec->vtx.prim_count--; /* drop the last primitive */
860 }
861 }
862 }
863
864
865 /**
866 * Called via glEnd.
867 */
868 static void GLAPIENTRY
869 vbo_exec_End(void)
870 {
871 GET_CURRENT_CONTEXT(ctx);
872 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
873
874 if (!_mesa_inside_begin_end(ctx)) {
875 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
876 return;
877 }
878
879 ctx->Exec = ctx->OutsideBeginEnd;
880
881 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
882 ctx->CurrentServerDispatch = ctx->Exec;
883 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd) {
884 ctx->CurrentClientDispatch = ctx->Exec;
885 _glapi_set_dispatch(ctx->CurrentClientDispatch);
886 }
887
888 if (exec->vtx.prim_count > 0) {
889 /* close off current primitive */
890 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
891 unsigned count = exec->vtx.vert_count - last_prim->start;
892
893 last_prim->end = 1;
894 last_prim->count = count;
895
896 if (count)
897 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
898
899 /* Special handling for GL_LINE_LOOP */
900 if (last_prim->mode == GL_LINE_LOOP && last_prim->begin == 0) {
901 /* We're finishing drawing a line loop. Append 0th vertex onto
902 * end of vertex buffer so we can draw it as a line strip.
903 */
904 const fi_type *src = exec->vtx.buffer_map +
905 last_prim->start * exec->vtx.vertex_size;
906 fi_type *dst = exec->vtx.buffer_map +
907 exec->vtx.vert_count * exec->vtx.vertex_size;
908
909 /* copy 0th vertex to end of buffer */
910 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
911
912 last_prim->start++; /* skip vertex0 */
913 /* note that last_prim->count stays unchanged */
914 last_prim->mode = GL_LINE_STRIP;
915
916 /* Increment the vertex count so the next primitive doesn't
917 * overwrite the last vertex which we just added.
918 */
919 exec->vtx.vert_count++;
920 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
921 }
922
923 try_vbo_merge(exec);
924 }
925
926 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
927
928 if (exec->vtx.prim_count == VBO_MAX_PRIM)
929 vbo_exec_vtx_flush(exec);
930
931 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
932 _mesa_flush(ctx);
933 }
934 }
935
936
937 /**
938 * Called via glPrimitiveRestartNV()
939 */
940 static void GLAPIENTRY
941 vbo_exec_PrimitiveRestartNV(void)
942 {
943 GLenum curPrim;
944 GET_CURRENT_CONTEXT(ctx);
945
946 curPrim = ctx->Driver.CurrentExecPrimitive;
947
948 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
949 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
950 }
951 else {
952 vbo_exec_End();
953 vbo_exec_Begin(curPrim);
954 }
955 }
956
957
958 static void
959 vbo_exec_vtxfmt_init(struct vbo_exec_context *exec)
960 {
961 struct gl_context *ctx = exec->ctx;
962 GLvertexformat *vfmt = &exec->vtxfmt;
963
964 #define NAME_AE(x) _ae_##x
965 #define NAME_CALLLIST(x) _mesa_##x
966 #define NAME(x) vbo_exec_##x
967 #define NAME_ES(x) _es_##x
968
969 #include "vbo_init_tmp.h"
970 }
971
972
973 static void
974 vbo_reset_all_attr(struct vbo_exec_context *exec)
975 {
976 while (exec->vtx.enabled) {
977 const int i = u_bit_scan64(&exec->vtx.enabled);
978
979 /* Reset the vertex attribute by setting its size to zero. */
980 exec->vtx.attr[i].size = 0;
981 exec->vtx.attr[i].type = GL_FLOAT;
982 exec->vtx.attr[i].active_size = 0;
983 exec->vtx.attrptr[i] = NULL;
984 }
985
986 exec->vtx.vertex_size = 0;
987 }
988
989
990 void
991 vbo_exec_vtx_init(struct vbo_exec_context *exec, bool use_buffer_objects)
992 {
993 struct gl_context *ctx = exec->ctx;
994
995 if (use_buffer_objects) {
996 /* Use buffer objects for immediate mode. */
997 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
998
999 exec->vtx.bufferobj = ctx->Driver.NewBufferObject(ctx, IMM_BUFFER_NAME);
1000
1001 /* Map the buffer. */
1002 vbo_exec_vtx_map(exec);
1003 assert(exec->vtx.buffer_ptr);
1004 } else {
1005 /* Use allocated memory for immediate mode. */
1006 _mesa_reference_buffer_object(ctx,
1007 &exec->vtx.bufferobj,
1008 ctx->Shared->NullBufferObj);
1009
1010 exec->vtx.buffer_map = _mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64);
1011 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
1012 }
1013
1014 vbo_exec_vtxfmt_init(exec);
1015 _mesa_noop_vtxfmt_init(ctx, &exec->vtxfmt_noop);
1016
1017 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1018 vbo_reset_all_attr(exec);
1019 }
1020
1021
1022 void
1023 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1024 {
1025 /* using a real VBO for vertex data */
1026 struct gl_context *ctx = exec->ctx;
1027
1028 /* True VBOs should already be unmapped
1029 */
1030 if (exec->vtx.buffer_map) {
1031 assert(exec->vtx.bufferobj->Name == 0 ||
1032 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1033 if (exec->vtx.bufferobj->Name == 0) {
1034 _mesa_align_free(exec->vtx.buffer_map);
1035 exec->vtx.buffer_map = NULL;
1036 exec->vtx.buffer_ptr = NULL;
1037 }
1038 }
1039
1040 /* Free the vertex buffer. Unmap first if needed.
1041 */
1042 if (_mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1043 ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1044 }
1045 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1046 }
1047
1048
1049 /**
1050 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1051 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1052 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1053 * __struct gl_contextRec::Current and gl_light_attrib::Material
1054 *
1055 * Note that the default T&L engine never clears the
1056 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1057 *
1058 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1059 */
1060 void
1061 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1062 {
1063 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1064
1065 #ifndef NDEBUG
1066 /* debug check: make sure we don't get called recursively */
1067 exec->flush_call_depth++;
1068 assert(exec->flush_call_depth == 1);
1069 #endif
1070
1071 if (_mesa_inside_begin_end(ctx)) {
1072 /* We've had glBegin but not glEnd! */
1073 #ifndef NDEBUG
1074 exec->flush_call_depth--;
1075 assert(exec->flush_call_depth == 0);
1076 #endif
1077 return;
1078 }
1079
1080 /* Flush (draw). */
1081 vbo_exec_FlushVertices_internal(exec, flags);
1082
1083 #ifndef NDEBUG
1084 exec->flush_call_depth--;
1085 assert(exec->flush_call_depth == 0);
1086 #endif
1087 }
1088
1089
1090 void GLAPIENTRY
1091 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1092 {
1093 vbo_exec_Color4f(r, g, b, a);
1094 }
1095
1096
1097 void GLAPIENTRY
1098 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1099 {
1100 vbo_exec_Normal3f(x, y, z);
1101 }
1102
1103
1104 void GLAPIENTRY
1105 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1106 {
1107 vbo_exec_MultiTexCoord4f(target, s, t, r, q);
1108 }
1109
1110
1111 void GLAPIENTRY
1112 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1113 {
1114 vbo_exec_Materialfv(face, pname, params);
1115 }
1116
1117
1118 void GLAPIENTRY
1119 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1120 {
1121 GLfloat p[4];
1122 p[0] = param;
1123 p[1] = p[2] = p[3] = 0.0F;
1124 vbo_exec_Materialfv(face, pname, p);
1125 }
1126
1127
1128 /**
1129 * A special version of glVertexAttrib4f that does not treat index 0 as
1130 * VBO_ATTRIB_POS.
1131 */
1132 static void
1133 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1134 {
1135 GET_CURRENT_CONTEXT(ctx);
1136 if (index < MAX_VERTEX_GENERIC_ATTRIBS)
1137 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1138 else
1139 ERROR(GL_INVALID_VALUE);
1140 }
1141
1142 void GLAPIENTRY
1143 _es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1144 {
1145 VertexAttrib4f_nopos(index, x, y, z, w);
1146 }
1147
1148
1149 void GLAPIENTRY
1150 _es_VertexAttrib1f(GLuint indx, GLfloat x)
1151 {
1152 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1153 }
1154
1155
1156 void GLAPIENTRY
1157 _es_VertexAttrib1fv(GLuint indx, const GLfloat* values)
1158 {
1159 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1160 }
1161
1162
1163 void GLAPIENTRY
1164 _es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y)
1165 {
1166 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1167 }
1168
1169
1170 void GLAPIENTRY
1171 _es_VertexAttrib2fv(GLuint indx, const GLfloat* values)
1172 {
1173 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1174 }
1175
1176
1177 void GLAPIENTRY
1178 _es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1179 {
1180 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1181 }
1182
1183
1184 void GLAPIENTRY
1185 _es_VertexAttrib3fv(GLuint indx, const GLfloat* values)
1186 {
1187 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1188 }
1189
1190
1191 void GLAPIENTRY
1192 _es_VertexAttrib4fv(GLuint indx, const GLfloat* values)
1193 {
1194 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1195 }