vbo: clean up conditional blocks in ATTR_UNION
[mesa.git] / src / mesa / vbo / vbo_exec_api.c
1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46
47 #include "vbo_noop.h"
48 #include "vbo_private.h"
49
50
51 /** ID/name for immediate-mode VBO */
52 #define IMM_BUFFER_NAME 0xaabbccdd
53
54
55 static void
56 vbo_reset_all_attr(struct vbo_exec_context *exec);
57
58
59 /**
60 * Close off the last primitive, execute the buffer, restart the
61 * primitive. This is called when we fill a vertex buffer before
62 * hitting glEnd.
63 */
64 static void
65 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
66 {
67 if (exec->vtx.prim_count == 0) {
68 exec->vtx.copied.nr = 0;
69 exec->vtx.vert_count = 0;
70 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
71 }
72 else {
73 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
74 const GLuint last_begin = last_prim->begin;
75 GLuint last_count;
76
77 if (_mesa_inside_begin_end(exec->ctx)) {
78 last_prim->count = exec->vtx.vert_count - last_prim->start;
79 }
80
81 last_count = last_prim->count;
82
83 /* Special handling for wrapping GL_LINE_LOOP */
84 if (last_prim->mode == GL_LINE_LOOP &&
85 last_count > 0 &&
86 !last_prim->end) {
87 /* draw this section of the incomplete line loop as a line strip */
88 last_prim->mode = GL_LINE_STRIP;
89 if (!last_prim->begin) {
90 /* This is not the first section of the line loop, so don't
91 * draw the 0th vertex. We're saving it until we draw the
92 * very last section of the loop.
93 */
94 last_prim->start++;
95 last_prim->count--;
96 }
97 }
98
99 /* Execute the buffer and save copied vertices.
100 */
101 if (exec->vtx.vert_count)
102 vbo_exec_vtx_flush(exec);
103 else {
104 exec->vtx.prim_count = 0;
105 exec->vtx.copied.nr = 0;
106 }
107
108 /* Emit a glBegin to start the new list.
109 */
110 assert(exec->vtx.prim_count == 0);
111
112 if (_mesa_inside_begin_end(exec->ctx)) {
113 exec->vtx.prim[0].mode = exec->ctx->Driver.CurrentExecPrimitive;
114 exec->vtx.prim[0].begin = 0;
115 exec->vtx.prim[0].end = 0;
116 exec->vtx.prim[0].start = 0;
117 exec->vtx.prim[0].count = 0;
118 exec->vtx.prim_count++;
119
120 if (exec->vtx.copied.nr == last_count)
121 exec->vtx.prim[0].begin = last_begin;
122 }
123 }
124 }
125
126
127 /**
128 * Deal with buffer wrapping where provoked by the vertex buffer
129 * filling up, as opposed to upgrade_vertex().
130 */
131 static void
132 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
133 {
134 unsigned numComponents;
135
136 /* Run pipeline on current vertices, copy wrapped vertices
137 * to exec->vtx.copied.
138 */
139 vbo_exec_wrap_buffers(exec);
140
141 if (!exec->vtx.buffer_ptr) {
142 /* probably ran out of memory earlier when allocating the VBO */
143 return;
144 }
145
146 /* Copy stored stored vertices to start of new list.
147 */
148 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
149
150 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
151 memcpy(exec->vtx.buffer_ptr,
152 exec->vtx.copied.buffer,
153 numComponents * sizeof(fi_type));
154 exec->vtx.buffer_ptr += numComponents;
155 exec->vtx.vert_count += exec->vtx.copied.nr;
156
157 exec->vtx.copied.nr = 0;
158 }
159
160
161 /**
162 * Copy the active vertex's values to the ctx->Current fields.
163 */
164 static void
165 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
166 {
167 struct gl_context *ctx = exec->ctx;
168 struct vbo_context *vbo = vbo_context(ctx);
169 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
170
171 while (enabled) {
172 const int i = u_bit_scan64(&enabled);
173
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
176 */
177 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
178 fi_type tmp[8]; /* space for doubles */
179 int dmul = 1;
180
181 if (exec->vtx.attr[i].type == GL_DOUBLE ||
182 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB)
183 dmul = 2;
184
185 assert(exec->vtx.attr[i].size);
186
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
189 memset(tmp, 0, sizeof(tmp));
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
191 } else {
192 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
193 exec->vtx.attr[i].size,
194 exec->vtx.attrptr[i],
195 exec->vtx.attr[i].type);
196 }
197
198 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
199 memcmp(current, tmp, 4 * sizeof(GLfloat) * dmul) != 0) {
200 memcpy(current, tmp, 4 * sizeof(GLfloat) * dmul);
201
202 /* Given that we explicitly state size here, there is no need
203 * for the COPY_CLEAN above, could just copy 16 bytes and be
204 * done. The only problem is when Mesa accesses ctx->Current
205 * directly.
206 */
207 /* Size here is in components - not bytes */
208 vbo_set_vertex_format(&vbo->current[i].Format,
209 exec->vtx.attr[i].size / dmul,
210 exec->vtx.attr[i].type);
211
212 /* This triggers rather too much recalculation of Mesa state
213 * that doesn't get used (eg light positions).
214 */
215 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT &&
216 i <= VBO_ATTRIB_MAT_BACK_INDEXES)
217 ctx->NewState |= _NEW_LIGHT;
218
219 ctx->NewState |= _NEW_CURRENT_ATTRIB;
220 }
221 }
222
223 /* Colormaterial -- this kindof sucks.
224 */
225 if (ctx->Light.ColorMaterialEnabled &&
226 exec->vtx.attr[VBO_ATTRIB_COLOR0].size) {
227 _mesa_update_color_material(ctx,
228 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
229 }
230 }
231
232
233 /**
234 * Flush existing data, set new attrib size, replay copied vertices.
235 * This is called when we transition from a small vertex attribute size
236 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
237 * We need to go back over the previous 2-component texcoords and insert
238 * zero and one values.
239 * \param attr VBO_ATTRIB_x vertex attribute value
240 */
241 static void
242 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
243 GLuint attr, GLuint newSize)
244 {
245 struct gl_context *ctx = exec->ctx;
246 struct vbo_context *vbo = vbo_context(ctx);
247 const GLint lastcount = exec->vtx.vert_count;
248 fi_type *old_attrptr[VBO_ATTRIB_MAX];
249 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
250 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
251 const GLuint oldSize = exec->vtx.attr[attr].size;
252 GLuint i;
253
254 assert(attr < VBO_ATTRIB_MAX);
255
256 /* Run pipeline on current vertices, copy wrapped vertices
257 * to exec->vtx.copied.
258 */
259 vbo_exec_wrap_buffers(exec);
260
261 if (unlikely(exec->vtx.copied.nr)) {
262 /* We're in the middle of a primitive, keep the old vertex
263 * format around to be able to translate the copied vertices to
264 * the new format.
265 */
266 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
267 }
268
269 /* Heuristic: Attempt to isolate attributes received outside
270 * begin/end so that they don't bloat the vertices.
271 */
272 if (!_mesa_inside_begin_end(ctx) &&
273 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
274 vbo_exec_copy_to_current(exec);
275 vbo_reset_all_attr(exec);
276 }
277
278 /* Fix up sizes:
279 */
280 exec->vtx.attr[attr].size = newSize;
281 exec->vtx.vertex_size += newSize - oldSize;
282 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
283 exec->vtx.max_vert = vbo_compute_max_verts(exec);
284 exec->vtx.vert_count = 0;
285 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
286 exec->vtx.enabled |= BITFIELD64_BIT(attr);
287
288 if (attr != 0) {
289 if (unlikely(oldSize)) {
290 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
291
292 /* If there are attribs after the resized attrib... */
293 if (offset + oldSize < old_vtx_size_no_pos) {
294 int size_diff = newSize - oldSize;
295 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
296 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
297 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
298 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
299
300 if (size_diff < 0) {
301 /* Decreasing the size: Copy from first to last to move
302 * elements to the left.
303 */
304 fi_type *old_end = old_last + 1;
305 fi_type *old = old_first;
306 fi_type *new = new_first;
307
308 do {
309 *new++ = *old++;
310 } while (old != old_end);
311 } else {
312 /* Increasing the size: Copy from last to first to move
313 * elements to the right.
314 */
315 fi_type *old_end = old_first - 1;
316 fi_type *old = old_last;
317 fi_type *new = new_last;
318
319 do {
320 *new-- = *old--;
321 } while (old != old_end);
322 }
323
324 /* Update pointers to attribs, because we moved them. */
325 GLbitfield64 enabled = exec->vtx.enabled &
326 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
327 ~BITFIELD64_BIT(attr);
328 while (enabled) {
329 unsigned i = u_bit_scan64(&enabled);
330
331 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
332 exec->vtx.attrptr[i] += size_diff;
333 }
334 }
335 } else {
336 /* Just have to append the new attribute at the end */
337 exec->vtx.attrptr[attr] = exec->vtx.vertex +
338 exec->vtx.vertex_size_no_pos - newSize;
339 }
340 }
341
342 /* The position is always last. */
343 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
344
345 /* Replay stored vertices to translate them
346 * to new format here.
347 *
348 * -- No need to replay - just copy piecewise
349 */
350 if (unlikely(exec->vtx.copied.nr)) {
351 fi_type *data = exec->vtx.copied.buffer;
352 fi_type *dest = exec->vtx.buffer_ptr;
353
354 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
355
356 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
357 GLbitfield64 enabled = exec->vtx.enabled;
358 while (enabled) {
359 const int j = u_bit_scan64(&enabled);
360 GLuint sz = exec->vtx.attr[j].size;
361 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
362 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
363
364 assert(sz);
365
366 if (j == attr) {
367 if (oldSize) {
368 fi_type tmp[4];
369 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
370 data + old_offset,
371 exec->vtx.attr[j].type);
372 COPY_SZ_4V(dest + new_offset, newSize, tmp);
373 } else {
374 fi_type *current = (fi_type *)vbo->current[j].Ptr;
375 COPY_SZ_4V(dest + new_offset, sz, current);
376 }
377 }
378 else {
379 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
380 }
381 }
382
383 data += old_vtx_size;
384 dest += exec->vtx.vertex_size;
385 }
386
387 exec->vtx.buffer_ptr = dest;
388 exec->vtx.vert_count += exec->vtx.copied.nr;
389 exec->vtx.copied.nr = 0;
390 }
391 }
392
393
394 /**
395 * This is when a vertex attribute transitions to a different size.
396 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
397 * glTexCoord4f() call. We promote the array from size=2 to size=4.
398 * \param newSize size of new vertex (number of 32-bit words).
399 * \param attr VBO_ATTRIB_x vertex attribute value
400 */
401 static void
402 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
403 GLuint newSize, GLenum newType)
404 {
405 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
406
407 assert(attr < VBO_ATTRIB_MAX);
408
409 if (newSize > exec->vtx.attr[attr].size ||
410 newType != exec->vtx.attr[attr].type) {
411 /* New size is larger. Need to flush existing vertices and get
412 * an enlarged vertex format.
413 */
414 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize);
415 }
416 else if (newSize < exec->vtx.attr[attr].active_size) {
417 GLuint i;
418 const fi_type *id =
419 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
420
421 /* New size is smaller - just need to fill in some
422 * zeros. Don't need to flush or wrap.
423 */
424 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
425 exec->vtx.attrptr[attr][i-1] = id[i-1];
426 }
427
428 exec->vtx.attr[attr].active_size = newSize;
429 exec->vtx.attr[attr].type = newType;
430 }
431
432
433 /**
434 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
435 * It depends on a few things, including whether we're inside or outside
436 * of glBegin/glEnd.
437 */
438 static inline bool
439 is_vertex_position(const struct gl_context *ctx, GLuint index)
440 {
441 return (index == 0 &&
442 _mesa_attr_zero_aliases_vertex(ctx) &&
443 _mesa_inside_begin_end(ctx));
444 }
445
446 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
447 #if UTIL_ARCH_LITTLE_ENDIAN
448 #define SET_64BIT(dst32, u64) do { \
449 *(dst32)++ = (u64); \
450 *(dst32)++ = (uint64_t)(u64) >> 32; \
451 } while (0)
452 #else
453 #define SET_64BIT(dst32, u64) do { \
454 *(dst32)++ = (uint64_t)(u64) >> 32; \
455 *(dst32)++ = (u64); \
456 } while (0)
457 #endif
458
459
460 /**
461 * This macro is used to implement all the glVertex, glColor, glTexCoord,
462 * glVertexAttrib, etc functions.
463 * \param A VBO_ATTRIB_x attribute index
464 * \param N attribute size (1..4)
465 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
466 * \param C cast type (uint32_t or uint64_t)
467 * \param V0, V1, v2, V3 attribute value
468 */
469 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
470 do { \
471 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
472 int sz = (sizeof(C) / sizeof(GLfloat)); \
473 \
474 assert(sz == 1 || sz == 2); \
475 \
476 /* check if attribute size or type is changing */ \
477 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
478 exec->vtx.attr[A].type != T)) { \
479 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
480 } \
481 \
482 /* store a copy of the attribute in exec except for glVertex */ \
483 if ((A) != 0) { \
484 C *dest = (C *)exec->vtx.attrptr[A]; \
485 if (N>0) dest[0] = V0; \
486 if (N>1) dest[1] = V1; \
487 if (N>2) dest[2] = V2; \
488 if (N>3) dest[3] = V3; \
489 assert(exec->vtx.attr[A].type == T); \
490 \
491 /* we now have accumulated a per-vertex attribute */ \
492 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
493 } else { \
494 /* This is a glVertex call */ \
495 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
496 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
497 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
498 \
499 /* Copy over attributes from exec. */ \
500 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
501 *dst++ = *src++; \
502 \
503 /* Store the position, which is always last and can have 32 or */ \
504 /* 64 bits per channel. */ \
505 if (sizeof(C) == 4) { \
506 if (N > 0) *dst++ = V0; \
507 if (N > 1) *dst++ = V1; \
508 if (N > 2) *dst++ = V2; \
509 if (N > 3) *dst++ = V3; \
510 } else { \
511 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
512 /* separately */ \
513 if (N > 0) SET_64BIT(dst, V0); \
514 if (N > 1) SET_64BIT(dst, V1); \
515 if (N > 2) SET_64BIT(dst, V2); \
516 if (N > 3) SET_64BIT(dst, V3); \
517 } \
518 \
519 /* dst now points at the beginning of the next vertex */ \
520 exec->vtx.buffer_ptr = (fi_type*)dst; \
521 \
522 /* Don't set FLUSH_UPDATE_CURRENT because */ \
523 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
524 \
525 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
526 vbo_exec_vtx_wrap(exec); \
527 } \
528 } while (0)
529
530
531 #undef ERROR
532 #define ERROR(err) _mesa_error(ctx, err, __func__)
533 #define TAG(x) vbo_exec_##x
534
535 #include "vbo_attrib_tmp.h"
536
537
538
539 /**
540 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
541 * this may be a (partial) no-op.
542 */
543 static void GLAPIENTRY
544 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
545 {
546 GLbitfield updateMats;
547 GET_CURRENT_CONTEXT(ctx);
548
549 /* This function should be a no-op when it tries to update material
550 * attributes which are currently tracking glColor via glColorMaterial.
551 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
552 * indicating which material attributes can actually be updated below.
553 */
554 if (ctx->Light.ColorMaterialEnabled) {
555 updateMats = ~ctx->Light._ColorMaterialBitmask;
556 }
557 else {
558 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
559 updateMats = ALL_MATERIAL_BITS;
560 }
561
562 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
563 updateMats &= FRONT_MATERIAL_BITS;
564 }
565 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
566 updateMats &= BACK_MATERIAL_BITS;
567 }
568 else if (face != GL_FRONT_AND_BACK) {
569 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
570 return;
571 }
572
573 switch (pname) {
574 case GL_EMISSION:
575 if (updateMats & MAT_BIT_FRONT_EMISSION)
576 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
577 if (updateMats & MAT_BIT_BACK_EMISSION)
578 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
579 break;
580 case GL_AMBIENT:
581 if (updateMats & MAT_BIT_FRONT_AMBIENT)
582 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
583 if (updateMats & MAT_BIT_BACK_AMBIENT)
584 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
585 break;
586 case GL_DIFFUSE:
587 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
588 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
589 if (updateMats & MAT_BIT_BACK_DIFFUSE)
590 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
591 break;
592 case GL_SPECULAR:
593 if (updateMats & MAT_BIT_FRONT_SPECULAR)
594 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
595 if (updateMats & MAT_BIT_BACK_SPECULAR)
596 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
597 break;
598 case GL_SHININESS:
599 if (*params < 0 || *params > ctx->Const.MaxShininess) {
600 _mesa_error(ctx, GL_INVALID_VALUE,
601 "glMaterial(invalid shininess: %f out range [0, %f])",
602 *params, ctx->Const.MaxShininess);
603 return;
604 }
605 if (updateMats & MAT_BIT_FRONT_SHININESS)
606 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
607 if (updateMats & MAT_BIT_BACK_SHININESS)
608 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
609 break;
610 case GL_COLOR_INDEXES:
611 if (ctx->API != API_OPENGL_COMPAT) {
612 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
613 return;
614 }
615 if (updateMats & MAT_BIT_FRONT_INDEXES)
616 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
617 if (updateMats & MAT_BIT_BACK_INDEXES)
618 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
619 break;
620 case GL_AMBIENT_AND_DIFFUSE:
621 if (updateMats & MAT_BIT_FRONT_AMBIENT)
622 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
623 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
624 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
625 if (updateMats & MAT_BIT_BACK_AMBIENT)
626 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
627 if (updateMats & MAT_BIT_BACK_DIFFUSE)
628 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
629 break;
630 default:
631 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
632 return;
633 }
634 }
635
636
637 /**
638 * Flush (draw) vertices.
639 *
640 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
641 */
642 static void
643 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
644 {
645 struct gl_context *ctx = exec->ctx;
646
647 if (flags & FLUSH_STORED_VERTICES) {
648 if (exec->vtx.vert_count) {
649 vbo_exec_vtx_flush(exec);
650 }
651
652 if (exec->vtx.vertex_size) {
653 vbo_exec_copy_to_current(exec);
654 vbo_reset_all_attr(exec);
655 }
656
657 /* All done. */
658 ctx->Driver.NeedFlush = 0;
659 } else {
660 assert(flags == FLUSH_UPDATE_CURRENT);
661
662 /* Note that the vertex size is unchanged.
663 * (vbo_reset_all_attr isn't called)
664 */
665 vbo_exec_copy_to_current(exec);
666
667 /* Only FLUSH_UPDATE_CURRENT is done. */
668 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
669 }
670 }
671
672
673 static void GLAPIENTRY
674 vbo_exec_EvalCoord1f(GLfloat u)
675 {
676 GET_CURRENT_CONTEXT(ctx);
677 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
678
679 {
680 GLint i;
681 if (exec->eval.recalculate_maps)
682 vbo_exec_eval_update(exec);
683
684 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
685 if (exec->eval.map1[i].map)
686 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
687 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
688 }
689 }
690
691 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
692 exec->vtx.vertex_size * sizeof(GLfloat));
693
694 vbo_exec_do_EvalCoord1f(exec, u);
695
696 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
697 exec->vtx.vertex_size * sizeof(GLfloat));
698 }
699
700
701 static void GLAPIENTRY
702 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v)
703 {
704 GET_CURRENT_CONTEXT(ctx);
705 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
706
707 {
708 GLint i;
709 if (exec->eval.recalculate_maps)
710 vbo_exec_eval_update(exec);
711
712 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
713 if (exec->eval.map2[i].map)
714 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
715 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
716 }
717
718 if (ctx->Eval.AutoNormal)
719 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
720 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
721 }
722
723 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
724 exec->vtx.vertex_size * sizeof(GLfloat));
725
726 vbo_exec_do_EvalCoord2f(exec, u, v);
727
728 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
729 exec->vtx.vertex_size * sizeof(GLfloat));
730 }
731
732
733 static void GLAPIENTRY
734 vbo_exec_EvalCoord1fv(const GLfloat *u)
735 {
736 vbo_exec_EvalCoord1f(u[0]);
737 }
738
739
740 static void GLAPIENTRY
741 vbo_exec_EvalCoord2fv(const GLfloat *u)
742 {
743 vbo_exec_EvalCoord2f(u[0], u[1]);
744 }
745
746
747 static void GLAPIENTRY
748 vbo_exec_EvalPoint1(GLint i)
749 {
750 GET_CURRENT_CONTEXT(ctx);
751 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
752 (GLfloat) ctx->Eval.MapGrid1un);
753 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
754
755 vbo_exec_EvalCoord1f(u);
756 }
757
758
759 static void GLAPIENTRY
760 vbo_exec_EvalPoint2(GLint i, GLint j)
761 {
762 GET_CURRENT_CONTEXT(ctx);
763 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
764 (GLfloat) ctx->Eval.MapGrid2un);
765 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
766 (GLfloat) ctx->Eval.MapGrid2vn);
767 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
768 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
769
770 vbo_exec_EvalCoord2f(u, v);
771 }
772
773
774 /**
775 * Called via glBegin.
776 */
777 static void GLAPIENTRY
778 vbo_exec_Begin(GLenum mode)
779 {
780 GET_CURRENT_CONTEXT(ctx);
781 struct vbo_context *vbo = vbo_context(ctx);
782 struct vbo_exec_context *exec = &vbo->exec;
783 int i;
784
785 if (_mesa_inside_begin_end(ctx)) {
786 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
787 return;
788 }
789
790 if (!_mesa_valid_prim_mode(ctx, mode, "glBegin")) {
791 return;
792 }
793
794 if (!_mesa_valid_to_render(ctx, "glBegin")) {
795 return;
796 }
797
798 /* Heuristic: attempt to isolate attributes occurring outside
799 * begin/end pairs.
800 *
801 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
802 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
803 */
804 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
805 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
806
807 i = exec->vtx.prim_count++;
808 exec->vtx.prim[i].mode = mode;
809 exec->vtx.prim[i].begin = 1;
810 exec->vtx.prim[i].end = 0;
811 exec->vtx.prim[i].start = exec->vtx.vert_count;
812 exec->vtx.prim[i].count = 0;
813
814 ctx->Driver.CurrentExecPrimitive = mode;
815
816 ctx->Exec = ctx->BeginEnd;
817
818 /* We may have been called from a display list, in which case we should
819 * leave dlist.c's dispatch table in place.
820 */
821 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
822 ctx->CurrentServerDispatch = ctx->Exec;
823 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
824 ctx->CurrentClientDispatch = ctx->Exec;
825 _glapi_set_dispatch(ctx->CurrentClientDispatch);
826 } else {
827 assert(ctx->CurrentClientDispatch == ctx->Save);
828 }
829 }
830
831
832 /**
833 * Try to merge / concatenate the two most recent VBO primitives.
834 */
835 static void
836 try_vbo_merge(struct vbo_exec_context *exec)
837 {
838 struct _mesa_prim *cur = &exec->vtx.prim[exec->vtx.prim_count - 1];
839
840 assert(exec->vtx.prim_count >= 1);
841
842 vbo_try_prim_conversion(cur);
843
844 if (exec->vtx.prim_count >= 2) {
845 struct _mesa_prim *prev = &exec->vtx.prim[exec->vtx.prim_count - 2];
846 assert(prev == cur - 1);
847
848 if (vbo_can_merge_prims(prev, cur)) {
849 assert(cur->begin);
850 assert(cur->end);
851 assert(prev->begin);
852 assert(prev->end);
853 vbo_merge_prims(prev, cur);
854 exec->vtx.prim_count--; /* drop the last primitive */
855 }
856 }
857 }
858
859
860 /**
861 * Called via glEnd.
862 */
863 static void GLAPIENTRY
864 vbo_exec_End(void)
865 {
866 GET_CURRENT_CONTEXT(ctx);
867 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
868
869 if (!_mesa_inside_begin_end(ctx)) {
870 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
871 return;
872 }
873
874 ctx->Exec = ctx->OutsideBeginEnd;
875
876 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
877 ctx->CurrentServerDispatch = ctx->Exec;
878 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd) {
879 ctx->CurrentClientDispatch = ctx->Exec;
880 _glapi_set_dispatch(ctx->CurrentClientDispatch);
881 }
882
883 if (exec->vtx.prim_count > 0) {
884 /* close off current primitive */
885 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
886 unsigned count = exec->vtx.vert_count - last_prim->start;
887
888 last_prim->end = 1;
889 last_prim->count = count;
890
891 if (count)
892 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
893
894 /* Special handling for GL_LINE_LOOP */
895 if (last_prim->mode == GL_LINE_LOOP && last_prim->begin == 0) {
896 /* We're finishing drawing a line loop. Append 0th vertex onto
897 * end of vertex buffer so we can draw it as a line strip.
898 */
899 const fi_type *src = exec->vtx.buffer_map +
900 last_prim->start * exec->vtx.vertex_size;
901 fi_type *dst = exec->vtx.buffer_map +
902 exec->vtx.vert_count * exec->vtx.vertex_size;
903
904 /* copy 0th vertex to end of buffer */
905 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
906
907 last_prim->start++; /* skip vertex0 */
908 /* note that last_prim->count stays unchanged */
909 last_prim->mode = GL_LINE_STRIP;
910
911 /* Increment the vertex count so the next primitive doesn't
912 * overwrite the last vertex which we just added.
913 */
914 exec->vtx.vert_count++;
915 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
916 }
917
918 try_vbo_merge(exec);
919 }
920
921 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
922
923 if (exec->vtx.prim_count == VBO_MAX_PRIM)
924 vbo_exec_vtx_flush(exec);
925
926 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
927 _mesa_flush(ctx);
928 }
929 }
930
931
932 /**
933 * Called via glPrimitiveRestartNV()
934 */
935 static void GLAPIENTRY
936 vbo_exec_PrimitiveRestartNV(void)
937 {
938 GLenum curPrim;
939 GET_CURRENT_CONTEXT(ctx);
940
941 curPrim = ctx->Driver.CurrentExecPrimitive;
942
943 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
944 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
945 }
946 else {
947 vbo_exec_End();
948 vbo_exec_Begin(curPrim);
949 }
950 }
951
952
953 static void
954 vbo_exec_vtxfmt_init(struct vbo_exec_context *exec)
955 {
956 struct gl_context *ctx = exec->ctx;
957 GLvertexformat *vfmt = &exec->vtxfmt;
958
959 #define NAME_AE(x) _ae_##x
960 #define NAME_CALLLIST(x) _mesa_##x
961 #define NAME(x) vbo_exec_##x
962 #define NAME_ES(x) _es_##x
963
964 #include "vbo_init_tmp.h"
965 }
966
967
968 static void
969 vbo_reset_all_attr(struct vbo_exec_context *exec)
970 {
971 while (exec->vtx.enabled) {
972 const int i = u_bit_scan64(&exec->vtx.enabled);
973
974 /* Reset the vertex attribute by setting its size to zero. */
975 exec->vtx.attr[i].size = 0;
976 exec->vtx.attr[i].type = GL_FLOAT;
977 exec->vtx.attr[i].active_size = 0;
978 exec->vtx.attrptr[i] = NULL;
979 }
980
981 exec->vtx.vertex_size = 0;
982 }
983
984
985 void
986 vbo_exec_vtx_init(struct vbo_exec_context *exec, bool use_buffer_objects)
987 {
988 struct gl_context *ctx = exec->ctx;
989
990 if (use_buffer_objects) {
991 /* Use buffer objects for immediate mode. */
992 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
993
994 exec->vtx.bufferobj = ctx->Driver.NewBufferObject(ctx, IMM_BUFFER_NAME);
995
996 /* Map the buffer. */
997 vbo_exec_vtx_map(exec);
998 assert(exec->vtx.buffer_ptr);
999 } else {
1000 /* Use allocated memory for immediate mode. */
1001 _mesa_reference_buffer_object(ctx,
1002 &exec->vtx.bufferobj,
1003 ctx->Shared->NullBufferObj);
1004
1005 exec->vtx.buffer_map = _mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64);
1006 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
1007 }
1008
1009 vbo_exec_vtxfmt_init(exec);
1010 _mesa_noop_vtxfmt_init(ctx, &exec->vtxfmt_noop);
1011
1012 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1013 vbo_reset_all_attr(exec);
1014 }
1015
1016
1017 void
1018 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1019 {
1020 /* using a real VBO for vertex data */
1021 struct gl_context *ctx = exec->ctx;
1022
1023 /* True VBOs should already be unmapped
1024 */
1025 if (exec->vtx.buffer_map) {
1026 assert(exec->vtx.bufferobj->Name == 0 ||
1027 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1028 if (exec->vtx.bufferobj->Name == 0) {
1029 _mesa_align_free(exec->vtx.buffer_map);
1030 exec->vtx.buffer_map = NULL;
1031 exec->vtx.buffer_ptr = NULL;
1032 }
1033 }
1034
1035 /* Free the vertex buffer. Unmap first if needed.
1036 */
1037 if (_mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1038 ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1039 }
1040 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1041 }
1042
1043
1044 /**
1045 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1046 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1047 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1048 * __struct gl_contextRec::Current and gl_light_attrib::Material
1049 *
1050 * Note that the default T&L engine never clears the
1051 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1052 *
1053 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1054 */
1055 void
1056 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1057 {
1058 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1059
1060 #ifndef NDEBUG
1061 /* debug check: make sure we don't get called recursively */
1062 exec->flush_call_depth++;
1063 assert(exec->flush_call_depth == 1);
1064 #endif
1065
1066 if (_mesa_inside_begin_end(ctx)) {
1067 /* We've had glBegin but not glEnd! */
1068 #ifndef NDEBUG
1069 exec->flush_call_depth--;
1070 assert(exec->flush_call_depth == 0);
1071 #endif
1072 return;
1073 }
1074
1075 /* Flush (draw). */
1076 vbo_exec_FlushVertices_internal(exec, flags);
1077
1078 #ifndef NDEBUG
1079 exec->flush_call_depth--;
1080 assert(exec->flush_call_depth == 0);
1081 #endif
1082 }
1083
1084
1085 void GLAPIENTRY
1086 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1087 {
1088 vbo_exec_Color4f(r, g, b, a);
1089 }
1090
1091
1092 void GLAPIENTRY
1093 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1094 {
1095 vbo_exec_Normal3f(x, y, z);
1096 }
1097
1098
1099 void GLAPIENTRY
1100 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1101 {
1102 vbo_exec_MultiTexCoord4f(target, s, t, r, q);
1103 }
1104
1105
1106 void GLAPIENTRY
1107 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1108 {
1109 vbo_exec_Materialfv(face, pname, params);
1110 }
1111
1112
1113 void GLAPIENTRY
1114 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1115 {
1116 GLfloat p[4];
1117 p[0] = param;
1118 p[1] = p[2] = p[3] = 0.0F;
1119 vbo_exec_Materialfv(face, pname, p);
1120 }
1121
1122
1123 /**
1124 * A special version of glVertexAttrib4f that does not treat index 0 as
1125 * VBO_ATTRIB_POS.
1126 */
1127 static void
1128 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1129 {
1130 GET_CURRENT_CONTEXT(ctx);
1131 if (index < MAX_VERTEX_GENERIC_ATTRIBS)
1132 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1133 else
1134 ERROR(GL_INVALID_VALUE);
1135 }
1136
1137 void GLAPIENTRY
1138 _es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1139 {
1140 VertexAttrib4f_nopos(index, x, y, z, w);
1141 }
1142
1143
1144 void GLAPIENTRY
1145 _es_VertexAttrib1f(GLuint indx, GLfloat x)
1146 {
1147 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1148 }
1149
1150
1151 void GLAPIENTRY
1152 _es_VertexAttrib1fv(GLuint indx, const GLfloat* values)
1153 {
1154 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1155 }
1156
1157
1158 void GLAPIENTRY
1159 _es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y)
1160 {
1161 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1162 }
1163
1164
1165 void GLAPIENTRY
1166 _es_VertexAttrib2fv(GLuint indx, const GLfloat* values)
1167 {
1168 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1169 }
1170
1171
1172 void GLAPIENTRY
1173 _es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1174 {
1175 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1176 }
1177
1178
1179 void GLAPIENTRY
1180 _es_VertexAttrib3fv(GLuint indx, const GLfloat* values)
1181 {
1182 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1183 }
1184
1185
1186 void GLAPIENTRY
1187 _es_VertexAttrib4fv(GLuint indx, const GLfloat* values)
1188 {
1189 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1190 }