00953cc28738c4be26e9951009cac4a84aca4f23
[mesa.git] / src / mesa / vbo / vbo_exec_api.c
1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46
47 #include "vbo_noop.h"
48 #include "vbo_private.h"
49
50
51 /** ID/name for immediate-mode VBO */
52 #define IMM_BUFFER_NAME 0xaabbccdd
53
54
55 static void
56 vbo_reset_all_attr(struct vbo_exec_context *exec);
57
58
59 /**
60 * Close off the last primitive, execute the buffer, restart the
61 * primitive. This is called when we fill a vertex buffer before
62 * hitting glEnd.
63 */
64 static void
65 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
66 {
67 if (exec->vtx.prim_count == 0) {
68 exec->vtx.copied.nr = 0;
69 exec->vtx.vert_count = 0;
70 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
71 }
72 else {
73 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
74 const GLuint last_begin = last_prim->begin;
75 GLuint last_count;
76
77 if (_mesa_inside_begin_end(exec->ctx)) {
78 last_prim->count = exec->vtx.vert_count - last_prim->start;
79 }
80
81 last_count = last_prim->count;
82
83 /* Special handling for wrapping GL_LINE_LOOP */
84 if (last_prim->mode == GL_LINE_LOOP &&
85 last_count > 0 &&
86 !last_prim->end) {
87 /* draw this section of the incomplete line loop as a line strip */
88 last_prim->mode = GL_LINE_STRIP;
89 if (!last_prim->begin) {
90 /* This is not the first section of the line loop, so don't
91 * draw the 0th vertex. We're saving it until we draw the
92 * very last section of the loop.
93 */
94 last_prim->start++;
95 last_prim->count--;
96 }
97 }
98
99 /* Execute the buffer and save copied vertices.
100 */
101 if (exec->vtx.vert_count)
102 vbo_exec_vtx_flush(exec);
103 else {
104 exec->vtx.prim_count = 0;
105 exec->vtx.copied.nr = 0;
106 }
107
108 /* Emit a glBegin to start the new list.
109 */
110 assert(exec->vtx.prim_count == 0);
111
112 if (_mesa_inside_begin_end(exec->ctx)) {
113 exec->vtx.prim[0].mode = exec->ctx->Driver.CurrentExecPrimitive;
114 exec->vtx.prim[0].begin = 0;
115 exec->vtx.prim[0].end = 0;
116 exec->vtx.prim[0].start = 0;
117 exec->vtx.prim[0].count = 0;
118 exec->vtx.prim_count++;
119
120 if (exec->vtx.copied.nr == last_count)
121 exec->vtx.prim[0].begin = last_begin;
122 }
123 }
124 }
125
126
127 /**
128 * Deal with buffer wrapping where provoked by the vertex buffer
129 * filling up, as opposed to upgrade_vertex().
130 */
131 static void
132 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
133 {
134 unsigned numComponents;
135
136 /* Run pipeline on current vertices, copy wrapped vertices
137 * to exec->vtx.copied.
138 */
139 vbo_exec_wrap_buffers(exec);
140
141 if (!exec->vtx.buffer_ptr) {
142 /* probably ran out of memory earlier when allocating the VBO */
143 return;
144 }
145
146 /* Copy stored stored vertices to start of new list.
147 */
148 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
149
150 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
151 memcpy(exec->vtx.buffer_ptr,
152 exec->vtx.copied.buffer,
153 numComponents * sizeof(fi_type));
154 exec->vtx.buffer_ptr += numComponents;
155 exec->vtx.vert_count += exec->vtx.copied.nr;
156
157 exec->vtx.copied.nr = 0;
158 }
159
160
161 /**
162 * Copy the active vertex's values to the ctx->Current fields.
163 */
164 static void
165 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
166 {
167 struct gl_context *ctx = exec->ctx;
168 struct vbo_context *vbo = vbo_context(ctx);
169 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
170
171 while (enabled) {
172 const int i = u_bit_scan64(&enabled);
173
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
176 */
177 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
178 fi_type tmp[8]; /* space for doubles */
179 int dmul = 1;
180
181 if (exec->vtx.attr[i].type == GL_DOUBLE ||
182 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB)
183 dmul = 2;
184
185 assert(exec->vtx.attr[i].size);
186
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
189 memset(tmp, 0, sizeof(tmp));
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
191 } else {
192 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
193 exec->vtx.attr[i].size,
194 exec->vtx.attrptr[i],
195 exec->vtx.attr[i].type);
196 }
197
198 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
199 memcmp(current, tmp, 4 * sizeof(GLfloat) * dmul) != 0) {
200 memcpy(current, tmp, 4 * sizeof(GLfloat) * dmul);
201
202 /* Given that we explicitly state size here, there is no need
203 * for the COPY_CLEAN above, could just copy 16 bytes and be
204 * done. The only problem is when Mesa accesses ctx->Current
205 * directly.
206 */
207 /* Size here is in components - not bytes */
208 vbo_set_vertex_format(&vbo->current[i].Format,
209 exec->vtx.attr[i].size / dmul,
210 exec->vtx.attr[i].type);
211
212 /* This triggers rather too much recalculation of Mesa state
213 * that doesn't get used (eg light positions).
214 */
215 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT &&
216 i <= VBO_ATTRIB_MAT_BACK_INDEXES)
217 ctx->NewState |= _NEW_LIGHT;
218
219 ctx->NewState |= _NEW_CURRENT_ATTRIB;
220 }
221 }
222
223 /* Colormaterial -- this kindof sucks.
224 */
225 if (ctx->Light.ColorMaterialEnabled &&
226 exec->vtx.attr[VBO_ATTRIB_COLOR0].size) {
227 _mesa_update_color_material(ctx,
228 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
229 }
230 }
231
232
233 /**
234 * Flush existing data, set new attrib size, replay copied vertices.
235 * This is called when we transition from a small vertex attribute size
236 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
237 * We need to go back over the previous 2-component texcoords and insert
238 * zero and one values.
239 * \param attr VBO_ATTRIB_x vertex attribute value
240 */
241 static void
242 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
243 GLuint attr, GLuint newSize)
244 {
245 struct gl_context *ctx = exec->ctx;
246 struct vbo_context *vbo = vbo_context(ctx);
247 const GLint lastcount = exec->vtx.vert_count;
248 fi_type *old_attrptr[VBO_ATTRIB_MAX];
249 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
250 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
251 const GLuint oldSize = exec->vtx.attr[attr].size;
252 GLuint i;
253
254 assert(attr < VBO_ATTRIB_MAX);
255
256 /* Run pipeline on current vertices, copy wrapped vertices
257 * to exec->vtx.copied.
258 */
259 vbo_exec_wrap_buffers(exec);
260
261 if (unlikely(exec->vtx.copied.nr)) {
262 /* We're in the middle of a primitive, keep the old vertex
263 * format around to be able to translate the copied vertices to
264 * the new format.
265 */
266 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
267 }
268
269 /* Heuristic: Attempt to isolate attributes received outside
270 * begin/end so that they don't bloat the vertices.
271 */
272 if (!_mesa_inside_begin_end(ctx) &&
273 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
274 vbo_exec_copy_to_current(exec);
275 vbo_reset_all_attr(exec);
276 }
277
278 /* Fix up sizes:
279 */
280 exec->vtx.attr[attr].size = newSize;
281 exec->vtx.vertex_size += newSize - oldSize;
282 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
283 exec->vtx.max_vert = vbo_compute_max_verts(exec);
284 exec->vtx.vert_count = 0;
285 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
286 exec->vtx.enabled |= BITFIELD64_BIT(attr);
287
288 if (attr != 0) {
289 if (unlikely(oldSize)) {
290 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
291
292 /* If there are attribs after the resized attrib... */
293 if (offset + oldSize < old_vtx_size_no_pos) {
294 int size_diff = newSize - oldSize;
295 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
296 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
297 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
298 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
299
300 if (size_diff < 0) {
301 /* Decreasing the size: Copy from first to last to move
302 * elements to the left.
303 */
304 fi_type *old_end = old_last + 1;
305 fi_type *old = old_first;
306 fi_type *new = new_first;
307
308 do {
309 *new++ = *old++;
310 } while (old != old_end);
311 } else {
312 /* Increasing the size: Copy from last to first to move
313 * elements to the right.
314 */
315 fi_type *old_end = old_first - 1;
316 fi_type *old = old_last;
317 fi_type *new = new_last;
318
319 do {
320 *new-- = *old--;
321 } while (old != old_end);
322 }
323
324 /* Update pointers to attribs, because we moved them. */
325 GLbitfield64 enabled = exec->vtx.enabled &
326 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
327 ~BITFIELD64_BIT(attr);
328 while (enabled) {
329 unsigned i = u_bit_scan64(&enabled);
330
331 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
332 exec->vtx.attrptr[i] += size_diff;
333 }
334 }
335 } else {
336 /* Just have to append the new attribute at the end */
337 exec->vtx.attrptr[attr] = exec->vtx.vertex +
338 exec->vtx.vertex_size_no_pos - newSize;
339 }
340 }
341
342 /* The position is always last. */
343 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
344
345 /* Replay stored vertices to translate them
346 * to new format here.
347 *
348 * -- No need to replay - just copy piecewise
349 */
350 if (unlikely(exec->vtx.copied.nr)) {
351 fi_type *data = exec->vtx.copied.buffer;
352 fi_type *dest = exec->vtx.buffer_ptr;
353
354 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
355
356 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
357 GLbitfield64 enabled = exec->vtx.enabled;
358 while (enabled) {
359 const int j = u_bit_scan64(&enabled);
360 GLuint sz = exec->vtx.attr[j].size;
361 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
362 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
363
364 assert(sz);
365
366 if (j == attr) {
367 if (oldSize) {
368 fi_type tmp[4];
369 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
370 data + old_offset,
371 exec->vtx.attr[j].type);
372 COPY_SZ_4V(dest + new_offset, newSize, tmp);
373 } else {
374 fi_type *current = (fi_type *)vbo->current[j].Ptr;
375 COPY_SZ_4V(dest + new_offset, sz, current);
376 }
377 }
378 else {
379 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
380 }
381 }
382
383 data += old_vtx_size;
384 dest += exec->vtx.vertex_size;
385 }
386
387 exec->vtx.buffer_ptr = dest;
388 exec->vtx.vert_count += exec->vtx.copied.nr;
389 exec->vtx.copied.nr = 0;
390 }
391 }
392
393
394 /**
395 * This is when a vertex attribute transitions to a different size.
396 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
397 * glTexCoord4f() call. We promote the array from size=2 to size=4.
398 * \param newSize size of new vertex (number of 32-bit words).
399 * \param attr VBO_ATTRIB_x vertex attribute value
400 */
401 static void
402 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
403 GLuint newSize, GLenum newType)
404 {
405 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
406
407 assert(attr < VBO_ATTRIB_MAX);
408
409 if (newSize > exec->vtx.attr[attr].size ||
410 newType != exec->vtx.attr[attr].type) {
411 /* New size is larger. Need to flush existing vertices and get
412 * an enlarged vertex format.
413 */
414 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize);
415 }
416 else if (newSize < exec->vtx.attr[attr].active_size) {
417 GLuint i;
418 const fi_type *id =
419 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
420
421 /* New size is smaller - just need to fill in some
422 * zeros. Don't need to flush or wrap.
423 */
424 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
425 exec->vtx.attrptr[attr][i-1] = id[i-1];
426 }
427
428 exec->vtx.attr[attr].active_size = newSize;
429 exec->vtx.attr[attr].type = newType;
430
431 /* Does setting NeedFlush belong here? Necessitates resetting
432 * vtxfmt on each flush (otherwise flags won't get reset
433 * afterwards).
434 */
435 if (attr == 0)
436 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
437 }
438
439
440 /**
441 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
442 * It depends on a few things, including whether we're inside or outside
443 * of glBegin/glEnd.
444 */
445 static inline bool
446 is_vertex_position(const struct gl_context *ctx, GLuint index)
447 {
448 return (index == 0 &&
449 _mesa_attr_zero_aliases_vertex(ctx) &&
450 _mesa_inside_begin_end(ctx));
451 }
452
453 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
454 #if UTIL_ARCH_LITTLE_ENDIAN
455 #define SET_64BIT(dst32, u64) do { \
456 *(dst32)++ = (u64); \
457 *(dst32)++ = (uint64_t)(u64) >> 32; \
458 } while (0)
459 #else
460 #define SET_64BIT(dst32, u64) do { \
461 *(dst32)++ = (uint64_t)(u64) >> 32; \
462 *(dst32)++ = (u64); \
463 } while (0)
464 #endif
465
466
467 /**
468 * This macro is used to implement all the glVertex, glColor, glTexCoord,
469 * glVertexAttrib, etc functions.
470 * \param A VBO_ATTRIB_x attribute index
471 * \param N attribute size (1..4)
472 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
473 * \param C cast type (uint32_t or uint64_t)
474 * \param V0, V1, v2, V3 attribute value
475 */
476 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
477 do { \
478 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
479 int sz = (sizeof(C) / sizeof(GLfloat)); \
480 \
481 assert(sz == 1 || sz == 2); \
482 \
483 /* check if attribute size or type is changing */ \
484 if (unlikely(exec->vtx.attr[A].active_size != N * sz) || \
485 unlikely(exec->vtx.attr[A].type != T)) { \
486 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
487 } \
488 \
489 /* store a copy of the attribute in exec except for glVertex */ \
490 if ((A) != 0) { \
491 C *dest = (C *)exec->vtx.attrptr[A]; \
492 if (N>0) dest[0] = V0; \
493 if (N>1) dest[1] = V1; \
494 if (N>2) dest[2] = V2; \
495 if (N>3) dest[3] = V3; \
496 assert(exec->vtx.attr[A].type == T); \
497 } \
498 \
499 if ((A) == 0) { \
500 /* This is a glVertex call */ \
501 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
502 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
503 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
504 \
505 /* Copy over attributes from exec. */ \
506 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
507 *dst++ = *src++; \
508 \
509 /* Store the position, which is always last and can have 32 or */ \
510 /* 64 bits per channel. */ \
511 if (sizeof(C) == 4) { \
512 if (N > 0) *dst++ = V0; \
513 if (N > 1) *dst++ = V1; \
514 if (N > 2) *dst++ = V2; \
515 if (N > 3) *dst++ = V3; \
516 } else { \
517 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
518 /* separately */ \
519 if (N > 0) SET_64BIT(dst, V0); \
520 if (N > 1) SET_64BIT(dst, V1); \
521 if (N > 2) SET_64BIT(dst, V2); \
522 if (N > 3) SET_64BIT(dst, V3); \
523 } \
524 \
525 /* dst now points at the beginning of the next vertex */ \
526 exec->vtx.buffer_ptr = (fi_type*)dst; \
527 \
528 /* Set FLUSH_STORED_VERTICES to indicate that there's now */ \
529 /* something to draw (not just updating a color or texcoord).*/ \
530 /* Don't set FLUSH_UPDATE_CURRENT because */ \
531 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
532 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES; \
533 \
534 if (++exec->vtx.vert_count >= exec->vtx.max_vert) \
535 vbo_exec_vtx_wrap(exec); \
536 } else { \
537 /* we now have accumulated per-vertex attributes */ \
538 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
539 } \
540 } while (0)
541
542
543 #undef ERROR
544 #define ERROR(err) _mesa_error(ctx, err, __func__)
545 #define TAG(x) vbo_exec_##x
546
547 #include "vbo_attrib_tmp.h"
548
549
550
551 /**
552 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
553 * this may be a (partial) no-op.
554 */
555 static void GLAPIENTRY
556 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
557 {
558 GLbitfield updateMats;
559 GET_CURRENT_CONTEXT(ctx);
560
561 /* This function should be a no-op when it tries to update material
562 * attributes which are currently tracking glColor via glColorMaterial.
563 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
564 * indicating which material attributes can actually be updated below.
565 */
566 if (ctx->Light.ColorMaterialEnabled) {
567 updateMats = ~ctx->Light._ColorMaterialBitmask;
568 }
569 else {
570 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
571 updateMats = ALL_MATERIAL_BITS;
572 }
573
574 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
575 updateMats &= FRONT_MATERIAL_BITS;
576 }
577 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
578 updateMats &= BACK_MATERIAL_BITS;
579 }
580 else if (face != GL_FRONT_AND_BACK) {
581 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
582 return;
583 }
584
585 switch (pname) {
586 case GL_EMISSION:
587 if (updateMats & MAT_BIT_FRONT_EMISSION)
588 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
589 if (updateMats & MAT_BIT_BACK_EMISSION)
590 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
591 break;
592 case GL_AMBIENT:
593 if (updateMats & MAT_BIT_FRONT_AMBIENT)
594 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
595 if (updateMats & MAT_BIT_BACK_AMBIENT)
596 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
597 break;
598 case GL_DIFFUSE:
599 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
600 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
601 if (updateMats & MAT_BIT_BACK_DIFFUSE)
602 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
603 break;
604 case GL_SPECULAR:
605 if (updateMats & MAT_BIT_FRONT_SPECULAR)
606 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
607 if (updateMats & MAT_BIT_BACK_SPECULAR)
608 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
609 break;
610 case GL_SHININESS:
611 if (*params < 0 || *params > ctx->Const.MaxShininess) {
612 _mesa_error(ctx, GL_INVALID_VALUE,
613 "glMaterial(invalid shininess: %f out range [0, %f])",
614 *params, ctx->Const.MaxShininess);
615 return;
616 }
617 if (updateMats & MAT_BIT_FRONT_SHININESS)
618 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
619 if (updateMats & MAT_BIT_BACK_SHININESS)
620 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
621 break;
622 case GL_COLOR_INDEXES:
623 if (ctx->API != API_OPENGL_COMPAT) {
624 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
625 return;
626 }
627 if (updateMats & MAT_BIT_FRONT_INDEXES)
628 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
629 if (updateMats & MAT_BIT_BACK_INDEXES)
630 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
631 break;
632 case GL_AMBIENT_AND_DIFFUSE:
633 if (updateMats & MAT_BIT_FRONT_AMBIENT)
634 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
635 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
636 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
637 if (updateMats & MAT_BIT_BACK_AMBIENT)
638 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
639 if (updateMats & MAT_BIT_BACK_DIFFUSE)
640 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
641 break;
642 default:
643 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
644 return;
645 }
646 }
647
648
649 /**
650 * Flush (draw) vertices.
651 */
652 static void
653 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec)
654 {
655 if (exec->vtx.vert_count) {
656 vbo_exec_vtx_flush(exec);
657 }
658
659 if (exec->vtx.vertex_size) {
660 vbo_exec_copy_to_current(exec);
661 vbo_reset_all_attr(exec);
662 }
663 }
664
665
666 static void GLAPIENTRY
667 vbo_exec_EvalCoord1f(GLfloat u)
668 {
669 GET_CURRENT_CONTEXT(ctx);
670 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
671
672 {
673 GLint i;
674 if (exec->eval.recalculate_maps)
675 vbo_exec_eval_update(exec);
676
677 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
678 if (exec->eval.map1[i].map)
679 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
680 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
681 }
682 }
683
684 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
685 exec->vtx.vertex_size * sizeof(GLfloat));
686
687 vbo_exec_do_EvalCoord1f(exec, u);
688
689 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
690 exec->vtx.vertex_size * sizeof(GLfloat));
691 }
692
693
694 static void GLAPIENTRY
695 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v)
696 {
697 GET_CURRENT_CONTEXT(ctx);
698 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
699
700 {
701 GLint i;
702 if (exec->eval.recalculate_maps)
703 vbo_exec_eval_update(exec);
704
705 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
706 if (exec->eval.map2[i].map)
707 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
708 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
709 }
710
711 if (ctx->Eval.AutoNormal)
712 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
713 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
714 }
715
716 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
717 exec->vtx.vertex_size * sizeof(GLfloat));
718
719 vbo_exec_do_EvalCoord2f(exec, u, v);
720
721 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
722 exec->vtx.vertex_size * sizeof(GLfloat));
723 }
724
725
726 static void GLAPIENTRY
727 vbo_exec_EvalCoord1fv(const GLfloat *u)
728 {
729 vbo_exec_EvalCoord1f(u[0]);
730 }
731
732
733 static void GLAPIENTRY
734 vbo_exec_EvalCoord2fv(const GLfloat *u)
735 {
736 vbo_exec_EvalCoord2f(u[0], u[1]);
737 }
738
739
740 static void GLAPIENTRY
741 vbo_exec_EvalPoint1(GLint i)
742 {
743 GET_CURRENT_CONTEXT(ctx);
744 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
745 (GLfloat) ctx->Eval.MapGrid1un);
746 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
747
748 vbo_exec_EvalCoord1f(u);
749 }
750
751
752 static void GLAPIENTRY
753 vbo_exec_EvalPoint2(GLint i, GLint j)
754 {
755 GET_CURRENT_CONTEXT(ctx);
756 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
757 (GLfloat) ctx->Eval.MapGrid2un);
758 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
759 (GLfloat) ctx->Eval.MapGrid2vn);
760 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
761 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
762
763 vbo_exec_EvalCoord2f(u, v);
764 }
765
766
767 /**
768 * Called via glBegin.
769 */
770 static void GLAPIENTRY
771 vbo_exec_Begin(GLenum mode)
772 {
773 GET_CURRENT_CONTEXT(ctx);
774 struct vbo_context *vbo = vbo_context(ctx);
775 struct vbo_exec_context *exec = &vbo->exec;
776 int i;
777
778 if (_mesa_inside_begin_end(ctx)) {
779 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
780 return;
781 }
782
783 if (!_mesa_valid_prim_mode(ctx, mode, "glBegin")) {
784 return;
785 }
786
787 if (!_mesa_valid_to_render(ctx, "glBegin")) {
788 return;
789 }
790
791 /* Heuristic: attempt to isolate attributes occurring outside
792 * begin/end pairs.
793 */
794 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
795 vbo_exec_FlushVertices_internal(exec);
796
797 i = exec->vtx.prim_count++;
798 exec->vtx.prim[i].mode = mode;
799 exec->vtx.prim[i].begin = 1;
800 exec->vtx.prim[i].end = 0;
801 exec->vtx.prim[i].indexed = 0;
802 exec->vtx.prim[i].pad = 0;
803 exec->vtx.prim[i].start = exec->vtx.vert_count;
804 exec->vtx.prim[i].count = 0;
805 exec->vtx.prim[i].num_instances = 1;
806 exec->vtx.prim[i].base_instance = 0;
807 exec->vtx.prim[i].is_indirect = 0;
808
809 ctx->Driver.CurrentExecPrimitive = mode;
810
811 ctx->Exec = ctx->BeginEnd;
812
813 /* We may have been called from a display list, in which case we should
814 * leave dlist.c's dispatch table in place.
815 */
816 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
817 ctx->CurrentServerDispatch = ctx->Exec;
818 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
819 ctx->CurrentClientDispatch = ctx->Exec;
820 _glapi_set_dispatch(ctx->CurrentClientDispatch);
821 } else {
822 assert(ctx->CurrentClientDispatch == ctx->Save);
823 }
824 }
825
826
827 /**
828 * Try to merge / concatenate the two most recent VBO primitives.
829 */
830 static void
831 try_vbo_merge(struct vbo_exec_context *exec)
832 {
833 struct _mesa_prim *cur = &exec->vtx.prim[exec->vtx.prim_count - 1];
834
835 assert(exec->vtx.prim_count >= 1);
836
837 vbo_try_prim_conversion(cur);
838
839 if (exec->vtx.prim_count >= 2) {
840 struct _mesa_prim *prev = &exec->vtx.prim[exec->vtx.prim_count - 2];
841 assert(prev == cur - 1);
842
843 if (vbo_can_merge_prims(prev, cur)) {
844 assert(cur->begin);
845 assert(cur->end);
846 assert(prev->begin);
847 assert(prev->end);
848 vbo_merge_prims(prev, cur);
849 exec->vtx.prim_count--; /* drop the last primitive */
850 }
851 }
852 }
853
854
855 /**
856 * Called via glEnd.
857 */
858 static void GLAPIENTRY
859 vbo_exec_End(void)
860 {
861 GET_CURRENT_CONTEXT(ctx);
862 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
863
864 if (!_mesa_inside_begin_end(ctx)) {
865 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
866 return;
867 }
868
869 ctx->Exec = ctx->OutsideBeginEnd;
870
871 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
872 ctx->CurrentServerDispatch = ctx->Exec;
873 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd) {
874 ctx->CurrentClientDispatch = ctx->Exec;
875 _glapi_set_dispatch(ctx->CurrentClientDispatch);
876 }
877
878 if (exec->vtx.prim_count > 0) {
879 /* close off current primitive */
880 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
881
882 last_prim->end = 1;
883 last_prim->count = exec->vtx.vert_count - last_prim->start;
884
885 /* Special handling for GL_LINE_LOOP */
886 if (last_prim->mode == GL_LINE_LOOP && last_prim->begin == 0) {
887 /* We're finishing drawing a line loop. Append 0th vertex onto
888 * end of vertex buffer so we can draw it as a line strip.
889 */
890 const fi_type *src = exec->vtx.buffer_map +
891 last_prim->start * exec->vtx.vertex_size;
892 fi_type *dst = exec->vtx.buffer_map +
893 exec->vtx.vert_count * exec->vtx.vertex_size;
894
895 /* copy 0th vertex to end of buffer */
896 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
897
898 last_prim->start++; /* skip vertex0 */
899 /* note that last_prim->count stays unchanged */
900 last_prim->mode = GL_LINE_STRIP;
901
902 /* Increment the vertex count so the next primitive doesn't
903 * overwrite the last vertex which we just added.
904 */
905 exec->vtx.vert_count++;
906 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
907 }
908
909 try_vbo_merge(exec);
910 }
911
912 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
913
914 if (exec->vtx.prim_count == VBO_MAX_PRIM)
915 vbo_exec_vtx_flush(exec);
916
917 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
918 _mesa_flush(ctx);
919 }
920 }
921
922
923 /**
924 * Called via glPrimitiveRestartNV()
925 */
926 static void GLAPIENTRY
927 vbo_exec_PrimitiveRestartNV(void)
928 {
929 GLenum curPrim;
930 GET_CURRENT_CONTEXT(ctx);
931
932 curPrim = ctx->Driver.CurrentExecPrimitive;
933
934 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
935 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
936 }
937 else {
938 vbo_exec_End();
939 vbo_exec_Begin(curPrim);
940 }
941 }
942
943
944 static void
945 vbo_exec_vtxfmt_init(struct vbo_exec_context *exec)
946 {
947 struct gl_context *ctx = exec->ctx;
948 GLvertexformat *vfmt = &exec->vtxfmt;
949
950 #define NAME_AE(x) _ae_##x
951 #define NAME_CALLLIST(x) _mesa_##x
952 #define NAME(x) vbo_exec_##x
953 #define NAME_ES(x) _es_##x
954
955 #include "vbo_init_tmp.h"
956 }
957
958
959 /**
960 * Tell the VBO module to use a real OpenGL vertex buffer object to
961 * store accumulated immediate-mode vertex data.
962 * This replaces the malloced buffer which was created in
963 * vb_exec_vtx_init() below.
964 */
965 void
966 vbo_use_buffer_objects(struct gl_context *ctx)
967 {
968 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
969 /* Any buffer name but 0 can be used here since this bufferobj won't
970 * go into the bufferobj hashtable.
971 */
972 GLuint bufName = IMM_BUFFER_NAME;
973
974 /* Make sure this func is only used once */
975 assert(exec->vtx.bufferobj == ctx->Shared->NullBufferObj);
976
977 _mesa_align_free(exec->vtx.buffer_map);
978 exec->vtx.buffer_map = NULL;
979 exec->vtx.buffer_ptr = NULL;
980
981 /* Allocate a real buffer object now */
982 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
983 exec->vtx.bufferobj = ctx->Driver.NewBufferObject(ctx, bufName);
984
985 /* Map the buffer. */
986 vbo_exec_vtx_map(exec);
987 assert(exec->vtx.buffer_ptr);
988 }
989
990
991 void
992 vbo_exec_vtx_init(struct vbo_exec_context *exec)
993 {
994 struct gl_context *ctx = exec->ctx;
995 GLuint i;
996
997 /* Allocate a buffer object. Will just reuse this object
998 * continuously, unless vbo_use_buffer_objects() is called to enable
999 * use of real VBOs.
1000 */
1001 _mesa_reference_buffer_object(ctx,
1002 &exec->vtx.bufferobj,
1003 ctx->Shared->NullBufferObj);
1004
1005 assert(!exec->vtx.buffer_map);
1006 exec->vtx.buffer_map = _mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64);
1007 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
1008
1009 vbo_exec_vtxfmt_init(exec);
1010 _mesa_noop_vtxfmt_init(ctx, &exec->vtxfmt_noop);
1011
1012 exec->vtx.enabled = 0;
1013 for (i = 0 ; i < ARRAY_SIZE(exec->vtx.attr); i++) {
1014 exec->vtx.attr[i].size = 0;
1015 exec->vtx.attr[i].type = GL_FLOAT;
1016 exec->vtx.attr[i].active_size = 0;
1017 }
1018
1019 exec->vtx.vertex_size = 0;
1020 }
1021
1022
1023 void
1024 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1025 {
1026 /* using a real VBO for vertex data */
1027 struct gl_context *ctx = exec->ctx;
1028
1029 /* True VBOs should already be unmapped
1030 */
1031 if (exec->vtx.buffer_map) {
1032 assert(exec->vtx.bufferobj->Name == 0 ||
1033 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1034 if (exec->vtx.bufferobj->Name == 0) {
1035 _mesa_align_free(exec->vtx.buffer_map);
1036 exec->vtx.buffer_map = NULL;
1037 exec->vtx.buffer_ptr = NULL;
1038 }
1039 }
1040
1041 /* Free the vertex buffer. Unmap first if needed.
1042 */
1043 if (_mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1044 ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1045 }
1046 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1047 }
1048
1049
1050 /**
1051 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1052 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1053 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1054 * __struct gl_contextRec::Current and gl_light_attrib::Material
1055 *
1056 * Note that the default T&L engine never clears the
1057 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1058 *
1059 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1060 */
1061 void
1062 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1063 {
1064 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1065
1066 #ifndef NDEBUG
1067 /* debug check: make sure we don't get called recursively */
1068 exec->flush_call_depth++;
1069 assert(exec->flush_call_depth == 1);
1070 #endif
1071
1072 if (_mesa_inside_begin_end(ctx)) {
1073 /* We've had glBegin but not glEnd! */
1074 #ifndef NDEBUG
1075 exec->flush_call_depth--;
1076 assert(exec->flush_call_depth == 0);
1077 #endif
1078 return;
1079 }
1080
1081 /* Flush (draw). */
1082 vbo_exec_FlushVertices_internal(exec);
1083
1084 /* Clear the dirty flush flags, because the flush is finished. */
1085 ctx->Driver.NeedFlush &= ~(FLUSH_UPDATE_CURRENT | flags);
1086
1087 #ifndef NDEBUG
1088 exec->flush_call_depth--;
1089 assert(exec->flush_call_depth == 0);
1090 #endif
1091 }
1092
1093
1094 /**
1095 * Reset the vertex attribute by setting its size to zero.
1096 */
1097 static void
1098 vbo_reset_attr(struct vbo_exec_context *exec, GLuint attr)
1099 {
1100 exec->vtx.attr[attr].size = 0;
1101 exec->vtx.attr[attr].type = GL_FLOAT;
1102 exec->vtx.attr[attr].active_size = 0;
1103 }
1104
1105
1106 static void
1107 vbo_reset_all_attr(struct vbo_exec_context *exec)
1108 {
1109 while (exec->vtx.enabled) {
1110 const int i = u_bit_scan64(&exec->vtx.enabled);
1111 vbo_reset_attr(exec, i);
1112 }
1113
1114 exec->vtx.vertex_size = 0;
1115 }
1116
1117
1118 void GLAPIENTRY
1119 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1120 {
1121 vbo_exec_Color4f(r, g, b, a);
1122 }
1123
1124
1125 void GLAPIENTRY
1126 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1127 {
1128 vbo_exec_Normal3f(x, y, z);
1129 }
1130
1131
1132 void GLAPIENTRY
1133 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1134 {
1135 vbo_exec_MultiTexCoord4f(target, s, t, r, q);
1136 }
1137
1138
1139 void GLAPIENTRY
1140 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1141 {
1142 vbo_exec_Materialfv(face, pname, params);
1143 }
1144
1145
1146 void GLAPIENTRY
1147 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1148 {
1149 GLfloat p[4];
1150 p[0] = param;
1151 p[1] = p[2] = p[3] = 0.0F;
1152 vbo_exec_Materialfv(face, pname, p);
1153 }
1154
1155
1156 /**
1157 * A special version of glVertexAttrib4f that does not treat index 0 as
1158 * VBO_ATTRIB_POS.
1159 */
1160 static void
1161 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1162 {
1163 GET_CURRENT_CONTEXT(ctx);
1164 if (index < MAX_VERTEX_GENERIC_ATTRIBS)
1165 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1166 else
1167 ERROR(GL_INVALID_VALUE);
1168 }
1169
1170 void GLAPIENTRY
1171 _es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1172 {
1173 VertexAttrib4f_nopos(index, x, y, z, w);
1174 }
1175
1176
1177 void GLAPIENTRY
1178 _es_VertexAttrib1f(GLuint indx, GLfloat x)
1179 {
1180 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1181 }
1182
1183
1184 void GLAPIENTRY
1185 _es_VertexAttrib1fv(GLuint indx, const GLfloat* values)
1186 {
1187 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1188 }
1189
1190
1191 void GLAPIENTRY
1192 _es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y)
1193 {
1194 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1195 }
1196
1197
1198 void GLAPIENTRY
1199 _es_VertexAttrib2fv(GLuint indx, const GLfloat* values)
1200 {
1201 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1202 }
1203
1204
1205 void GLAPIENTRY
1206 _es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1207 {
1208 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1209 }
1210
1211
1212 void GLAPIENTRY
1213 _es_VertexAttrib3fv(GLuint indx, const GLfloat* values)
1214 {
1215 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1216 }
1217
1218
1219 void GLAPIENTRY
1220 _es_VertexAttrib4fv(GLuint indx, const GLfloat* values)
1221 {
1222 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1223 }