vbo: remove dead code in vbo_can_merge_prims
[mesa.git] / src / mesa / vbo / vbo_exec_api.c
1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46
47 #include "vbo_noop.h"
48 #include "vbo_private.h"
49
50
51 /** ID/name for immediate-mode VBO */
52 #define IMM_BUFFER_NAME 0xaabbccdd
53
54
55 static void
56 vbo_reset_all_attr(struct vbo_exec_context *exec);
57
58
59 /**
60 * Close off the last primitive, execute the buffer, restart the
61 * primitive. This is called when we fill a vertex buffer before
62 * hitting glEnd.
63 */
64 static void
65 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
66 {
67 if (exec->vtx.prim_count == 0) {
68 exec->vtx.copied.nr = 0;
69 exec->vtx.vert_count = 0;
70 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
71 }
72 else {
73 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
74 const GLuint last_begin = last_prim->begin;
75 GLuint last_count;
76
77 if (_mesa_inside_begin_end(exec->ctx)) {
78 last_prim->count = exec->vtx.vert_count - last_prim->start;
79 }
80
81 last_count = last_prim->count;
82
83 /* Special handling for wrapping GL_LINE_LOOP */
84 if (last_prim->mode == GL_LINE_LOOP &&
85 last_count > 0 &&
86 !last_prim->end) {
87 /* draw this section of the incomplete line loop as a line strip */
88 last_prim->mode = GL_LINE_STRIP;
89 if (!last_prim->begin) {
90 /* This is not the first section of the line loop, so don't
91 * draw the 0th vertex. We're saving it until we draw the
92 * very last section of the loop.
93 */
94 last_prim->start++;
95 last_prim->count--;
96 }
97 }
98
99 /* Execute the buffer and save copied vertices.
100 */
101 if (exec->vtx.vert_count)
102 vbo_exec_vtx_flush(exec);
103 else {
104 exec->vtx.prim_count = 0;
105 exec->vtx.copied.nr = 0;
106 }
107
108 /* Emit a glBegin to start the new list.
109 */
110 assert(exec->vtx.prim_count == 0);
111
112 if (_mesa_inside_begin_end(exec->ctx)) {
113 exec->vtx.prim[0].mode = exec->ctx->Driver.CurrentExecPrimitive;
114 exec->vtx.prim[0].begin = 0;
115 exec->vtx.prim[0].end = 0;
116 exec->vtx.prim[0].start = 0;
117 exec->vtx.prim[0].count = 0;
118 exec->vtx.prim_count++;
119
120 if (exec->vtx.copied.nr == last_count)
121 exec->vtx.prim[0].begin = last_begin;
122 }
123 }
124 }
125
126
127 /**
128 * Deal with buffer wrapping where provoked by the vertex buffer
129 * filling up, as opposed to upgrade_vertex().
130 */
131 static void
132 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
133 {
134 unsigned numComponents;
135
136 /* Run pipeline on current vertices, copy wrapped vertices
137 * to exec->vtx.copied.
138 */
139 vbo_exec_wrap_buffers(exec);
140
141 if (!exec->vtx.buffer_ptr) {
142 /* probably ran out of memory earlier when allocating the VBO */
143 return;
144 }
145
146 /* Copy stored stored vertices to start of new list.
147 */
148 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
149
150 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
151 memcpy(exec->vtx.buffer_ptr,
152 exec->vtx.copied.buffer,
153 numComponents * sizeof(fi_type));
154 exec->vtx.buffer_ptr += numComponents;
155 exec->vtx.vert_count += exec->vtx.copied.nr;
156
157 exec->vtx.copied.nr = 0;
158 }
159
160
161 /**
162 * Copy the active vertex's values to the ctx->Current fields.
163 */
164 static void
165 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
166 {
167 struct gl_context *ctx = exec->ctx;
168 struct vbo_context *vbo = vbo_context(ctx);
169 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
170
171 while (enabled) {
172 const int i = u_bit_scan64(&enabled);
173
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
176 */
177 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
178 fi_type tmp[8]; /* space for doubles */
179 int dmul = 1;
180
181 if (exec->vtx.attr[i].type == GL_DOUBLE ||
182 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB)
183 dmul = 2;
184
185 assert(exec->vtx.attr[i].size);
186
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
189 memset(tmp, 0, sizeof(tmp));
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
191 } else {
192 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
193 exec->vtx.attr[i].size,
194 exec->vtx.attrptr[i],
195 exec->vtx.attr[i].type);
196 }
197
198 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
199 memcmp(current, tmp, 4 * sizeof(GLfloat) * dmul) != 0) {
200 memcpy(current, tmp, 4 * sizeof(GLfloat) * dmul);
201
202 /* Given that we explicitly state size here, there is no need
203 * for the COPY_CLEAN above, could just copy 16 bytes and be
204 * done. The only problem is when Mesa accesses ctx->Current
205 * directly.
206 */
207 /* Size here is in components - not bytes */
208 vbo_set_vertex_format(&vbo->current[i].Format,
209 exec->vtx.attr[i].size / dmul,
210 exec->vtx.attr[i].type);
211
212 /* This triggers rather too much recalculation of Mesa state
213 * that doesn't get used (eg light positions).
214 */
215 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT &&
216 i <= VBO_ATTRIB_MAT_BACK_INDEXES)
217 ctx->NewState |= _NEW_LIGHT;
218
219 ctx->NewState |= _NEW_CURRENT_ATTRIB;
220 }
221 }
222
223 /* Colormaterial -- this kindof sucks.
224 */
225 if (ctx->Light.ColorMaterialEnabled &&
226 exec->vtx.attr[VBO_ATTRIB_COLOR0].size) {
227 _mesa_update_color_material(ctx,
228 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
229 }
230 }
231
232
233 /**
234 * Flush existing data, set new attrib size, replay copied vertices.
235 * This is called when we transition from a small vertex attribute size
236 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
237 * We need to go back over the previous 2-component texcoords and insert
238 * zero and one values.
239 * \param attr VBO_ATTRIB_x vertex attribute value
240 */
241 static void
242 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
243 GLuint attr, GLuint newSize)
244 {
245 struct gl_context *ctx = exec->ctx;
246 struct vbo_context *vbo = vbo_context(ctx);
247 const GLint lastcount = exec->vtx.vert_count;
248 fi_type *old_attrptr[VBO_ATTRIB_MAX];
249 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
250 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
251 const GLuint oldSize = exec->vtx.attr[attr].size;
252 GLuint i;
253
254 assert(attr < VBO_ATTRIB_MAX);
255
256 /* Run pipeline on current vertices, copy wrapped vertices
257 * to exec->vtx.copied.
258 */
259 vbo_exec_wrap_buffers(exec);
260
261 if (unlikely(exec->vtx.copied.nr)) {
262 /* We're in the middle of a primitive, keep the old vertex
263 * format around to be able to translate the copied vertices to
264 * the new format.
265 */
266 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
267 }
268
269 /* Heuristic: Attempt to isolate attributes received outside
270 * begin/end so that they don't bloat the vertices.
271 */
272 if (!_mesa_inside_begin_end(ctx) &&
273 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
274 vbo_exec_copy_to_current(exec);
275 vbo_reset_all_attr(exec);
276 }
277
278 /* Fix up sizes:
279 */
280 exec->vtx.attr[attr].size = newSize;
281 exec->vtx.vertex_size += newSize - oldSize;
282 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
283 exec->vtx.max_vert = vbo_compute_max_verts(exec);
284 exec->vtx.vert_count = 0;
285 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
286 exec->vtx.enabled |= BITFIELD64_BIT(attr);
287
288 if (attr != 0) {
289 if (unlikely(oldSize)) {
290 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
291
292 /* If there are attribs after the resized attrib... */
293 if (offset + oldSize < old_vtx_size_no_pos) {
294 int size_diff = newSize - oldSize;
295 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
296 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
297 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
298 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
299
300 if (size_diff < 0) {
301 /* Decreasing the size: Copy from first to last to move
302 * elements to the left.
303 */
304 fi_type *old_end = old_last + 1;
305 fi_type *old = old_first;
306 fi_type *new = new_first;
307
308 do {
309 *new++ = *old++;
310 } while (old != old_end);
311 } else {
312 /* Increasing the size: Copy from last to first to move
313 * elements to the right.
314 */
315 fi_type *old_end = old_first - 1;
316 fi_type *old = old_last;
317 fi_type *new = new_last;
318
319 do {
320 *new-- = *old--;
321 } while (old != old_end);
322 }
323
324 /* Update pointers to attribs, because we moved them. */
325 GLbitfield64 enabled = exec->vtx.enabled &
326 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
327 ~BITFIELD64_BIT(attr);
328 while (enabled) {
329 unsigned i = u_bit_scan64(&enabled);
330
331 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
332 exec->vtx.attrptr[i] += size_diff;
333 }
334 }
335 } else {
336 /* Just have to append the new attribute at the end */
337 exec->vtx.attrptr[attr] = exec->vtx.vertex +
338 exec->vtx.vertex_size_no_pos - newSize;
339 }
340 }
341
342 /* The position is always last. */
343 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
344
345 /* Replay stored vertices to translate them
346 * to new format here.
347 *
348 * -- No need to replay - just copy piecewise
349 */
350 if (unlikely(exec->vtx.copied.nr)) {
351 fi_type *data = exec->vtx.copied.buffer;
352 fi_type *dest = exec->vtx.buffer_ptr;
353
354 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
355
356 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
357 GLbitfield64 enabled = exec->vtx.enabled;
358 while (enabled) {
359 const int j = u_bit_scan64(&enabled);
360 GLuint sz = exec->vtx.attr[j].size;
361 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
362 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
363
364 assert(sz);
365
366 if (j == attr) {
367 if (oldSize) {
368 fi_type tmp[4];
369 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
370 data + old_offset,
371 exec->vtx.attr[j].type);
372 COPY_SZ_4V(dest + new_offset, newSize, tmp);
373 } else {
374 fi_type *current = (fi_type *)vbo->current[j].Ptr;
375 COPY_SZ_4V(dest + new_offset, sz, current);
376 }
377 }
378 else {
379 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
380 }
381 }
382
383 data += old_vtx_size;
384 dest += exec->vtx.vertex_size;
385 }
386
387 exec->vtx.buffer_ptr = dest;
388 exec->vtx.vert_count += exec->vtx.copied.nr;
389 exec->vtx.copied.nr = 0;
390 }
391 }
392
393
394 /**
395 * This is when a vertex attribute transitions to a different size.
396 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
397 * glTexCoord4f() call. We promote the array from size=2 to size=4.
398 * \param newSize size of new vertex (number of 32-bit words).
399 * \param attr VBO_ATTRIB_x vertex attribute value
400 */
401 static void
402 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
403 GLuint newSize, GLenum newType)
404 {
405 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
406
407 assert(attr < VBO_ATTRIB_MAX);
408
409 if (newSize > exec->vtx.attr[attr].size ||
410 newType != exec->vtx.attr[attr].type) {
411 /* New size is larger. Need to flush existing vertices and get
412 * an enlarged vertex format.
413 */
414 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize);
415 }
416 else if (newSize < exec->vtx.attr[attr].active_size) {
417 GLuint i;
418 const fi_type *id =
419 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
420
421 /* New size is smaller - just need to fill in some
422 * zeros. Don't need to flush or wrap.
423 */
424 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
425 exec->vtx.attrptr[attr][i-1] = id[i-1];
426 }
427
428 exec->vtx.attr[attr].active_size = newSize;
429 exec->vtx.attr[attr].type = newType;
430
431 /* Does setting NeedFlush belong here? Necessitates resetting
432 * vtxfmt on each flush (otherwise flags won't get reset
433 * afterwards).
434 */
435 if (attr == 0)
436 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
437 }
438
439
440 /**
441 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
442 * It depends on a few things, including whether we're inside or outside
443 * of glBegin/glEnd.
444 */
445 static inline bool
446 is_vertex_position(const struct gl_context *ctx, GLuint index)
447 {
448 return (index == 0 &&
449 _mesa_attr_zero_aliases_vertex(ctx) &&
450 _mesa_inside_begin_end(ctx));
451 }
452
453 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
454 #if UTIL_ARCH_LITTLE_ENDIAN
455 #define SET_64BIT(dst32, u64) do { \
456 *(dst32)++ = (u64); \
457 *(dst32)++ = (uint64_t)(u64) >> 32; \
458 } while (0)
459 #else
460 #define SET_64BIT(dst32, u64) do { \
461 *(dst32)++ = (uint64_t)(u64) >> 32; \
462 *(dst32)++ = (u64); \
463 } while (0)
464 #endif
465
466
467 /**
468 * This macro is used to implement all the glVertex, glColor, glTexCoord,
469 * glVertexAttrib, etc functions.
470 * \param A VBO_ATTRIB_x attribute index
471 * \param N attribute size (1..4)
472 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
473 * \param C cast type (uint32_t or uint64_t)
474 * \param V0, V1, v2, V3 attribute value
475 */
476 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
477 do { \
478 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
479 int sz = (sizeof(C) / sizeof(GLfloat)); \
480 \
481 assert(sz == 1 || sz == 2); \
482 \
483 /* check if attribute size or type is changing */ \
484 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
485 exec->vtx.attr[A].type != T)) { \
486 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
487 } \
488 \
489 /* store a copy of the attribute in exec except for glVertex */ \
490 if ((A) != 0) { \
491 C *dest = (C *)exec->vtx.attrptr[A]; \
492 if (N>0) dest[0] = V0; \
493 if (N>1) dest[1] = V1; \
494 if (N>2) dest[2] = V2; \
495 if (N>3) dest[3] = V3; \
496 assert(exec->vtx.attr[A].type == T); \
497 } \
498 \
499 if ((A) == 0) { \
500 /* This is a glVertex call */ \
501 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
502 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
503 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
504 \
505 /* Copy over attributes from exec. */ \
506 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
507 *dst++ = *src++; \
508 \
509 /* Store the position, which is always last and can have 32 or */ \
510 /* 64 bits per channel. */ \
511 if (sizeof(C) == 4) { \
512 if (N > 0) *dst++ = V0; \
513 if (N > 1) *dst++ = V1; \
514 if (N > 2) *dst++ = V2; \
515 if (N > 3) *dst++ = V3; \
516 } else { \
517 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
518 /* separately */ \
519 if (N > 0) SET_64BIT(dst, V0); \
520 if (N > 1) SET_64BIT(dst, V1); \
521 if (N > 2) SET_64BIT(dst, V2); \
522 if (N > 3) SET_64BIT(dst, V3); \
523 } \
524 \
525 /* dst now points at the beginning of the next vertex */ \
526 exec->vtx.buffer_ptr = (fi_type*)dst; \
527 \
528 /* Don't set FLUSH_UPDATE_CURRENT because */ \
529 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
530 \
531 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
532 vbo_exec_vtx_wrap(exec); \
533 } else { \
534 /* we now have accumulated per-vertex attributes */ \
535 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
536 } \
537 } while (0)
538
539
540 #undef ERROR
541 #define ERROR(err) _mesa_error(ctx, err, __func__)
542 #define TAG(x) vbo_exec_##x
543
544 #include "vbo_attrib_tmp.h"
545
546
547
548 /**
549 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
550 * this may be a (partial) no-op.
551 */
552 static void GLAPIENTRY
553 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
554 {
555 GLbitfield updateMats;
556 GET_CURRENT_CONTEXT(ctx);
557
558 /* This function should be a no-op when it tries to update material
559 * attributes which are currently tracking glColor via glColorMaterial.
560 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
561 * indicating which material attributes can actually be updated below.
562 */
563 if (ctx->Light.ColorMaterialEnabled) {
564 updateMats = ~ctx->Light._ColorMaterialBitmask;
565 }
566 else {
567 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
568 updateMats = ALL_MATERIAL_BITS;
569 }
570
571 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
572 updateMats &= FRONT_MATERIAL_BITS;
573 }
574 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
575 updateMats &= BACK_MATERIAL_BITS;
576 }
577 else if (face != GL_FRONT_AND_BACK) {
578 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
579 return;
580 }
581
582 switch (pname) {
583 case GL_EMISSION:
584 if (updateMats & MAT_BIT_FRONT_EMISSION)
585 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
586 if (updateMats & MAT_BIT_BACK_EMISSION)
587 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
588 break;
589 case GL_AMBIENT:
590 if (updateMats & MAT_BIT_FRONT_AMBIENT)
591 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
592 if (updateMats & MAT_BIT_BACK_AMBIENT)
593 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
594 break;
595 case GL_DIFFUSE:
596 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
597 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
598 if (updateMats & MAT_BIT_BACK_DIFFUSE)
599 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
600 break;
601 case GL_SPECULAR:
602 if (updateMats & MAT_BIT_FRONT_SPECULAR)
603 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
604 if (updateMats & MAT_BIT_BACK_SPECULAR)
605 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
606 break;
607 case GL_SHININESS:
608 if (*params < 0 || *params > ctx->Const.MaxShininess) {
609 _mesa_error(ctx, GL_INVALID_VALUE,
610 "glMaterial(invalid shininess: %f out range [0, %f])",
611 *params, ctx->Const.MaxShininess);
612 return;
613 }
614 if (updateMats & MAT_BIT_FRONT_SHININESS)
615 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
616 if (updateMats & MAT_BIT_BACK_SHININESS)
617 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
618 break;
619 case GL_COLOR_INDEXES:
620 if (ctx->API != API_OPENGL_COMPAT) {
621 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
622 return;
623 }
624 if (updateMats & MAT_BIT_FRONT_INDEXES)
625 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
626 if (updateMats & MAT_BIT_BACK_INDEXES)
627 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
628 break;
629 case GL_AMBIENT_AND_DIFFUSE:
630 if (updateMats & MAT_BIT_FRONT_AMBIENT)
631 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
632 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
633 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
634 if (updateMats & MAT_BIT_BACK_AMBIENT)
635 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
636 if (updateMats & MAT_BIT_BACK_DIFFUSE)
637 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
638 break;
639 default:
640 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
641 return;
642 }
643 }
644
645
646 /**
647 * Flush (draw) vertices.
648 *
649 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
650 */
651 static void
652 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
653 {
654 struct gl_context *ctx = exec->ctx;
655
656 if (flags & FLUSH_STORED_VERTICES) {
657 if (exec->vtx.vert_count) {
658 vbo_exec_vtx_flush(exec);
659 }
660
661 if (exec->vtx.vertex_size) {
662 vbo_exec_copy_to_current(exec);
663 vbo_reset_all_attr(exec);
664 }
665
666 /* All done. */
667 ctx->Driver.NeedFlush = 0;
668 } else {
669 assert(flags == FLUSH_UPDATE_CURRENT);
670
671 /* Note that the vertex size is unchanged.
672 * (vbo_reset_all_attr isn't called)
673 */
674 vbo_exec_copy_to_current(exec);
675
676 /* Only FLUSH_UPDATE_CURRENT is done. */
677 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
678 }
679 }
680
681
682 static void GLAPIENTRY
683 vbo_exec_EvalCoord1f(GLfloat u)
684 {
685 GET_CURRENT_CONTEXT(ctx);
686 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
687
688 {
689 GLint i;
690 if (exec->eval.recalculate_maps)
691 vbo_exec_eval_update(exec);
692
693 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
694 if (exec->eval.map1[i].map)
695 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
696 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
697 }
698 }
699
700 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
701 exec->vtx.vertex_size * sizeof(GLfloat));
702
703 vbo_exec_do_EvalCoord1f(exec, u);
704
705 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
706 exec->vtx.vertex_size * sizeof(GLfloat));
707 }
708
709
710 static void GLAPIENTRY
711 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v)
712 {
713 GET_CURRENT_CONTEXT(ctx);
714 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
715
716 {
717 GLint i;
718 if (exec->eval.recalculate_maps)
719 vbo_exec_eval_update(exec);
720
721 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
722 if (exec->eval.map2[i].map)
723 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
724 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
725 }
726
727 if (ctx->Eval.AutoNormal)
728 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
729 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
730 }
731
732 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
733 exec->vtx.vertex_size * sizeof(GLfloat));
734
735 vbo_exec_do_EvalCoord2f(exec, u, v);
736
737 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
738 exec->vtx.vertex_size * sizeof(GLfloat));
739 }
740
741
742 static void GLAPIENTRY
743 vbo_exec_EvalCoord1fv(const GLfloat *u)
744 {
745 vbo_exec_EvalCoord1f(u[0]);
746 }
747
748
749 static void GLAPIENTRY
750 vbo_exec_EvalCoord2fv(const GLfloat *u)
751 {
752 vbo_exec_EvalCoord2f(u[0], u[1]);
753 }
754
755
756 static void GLAPIENTRY
757 vbo_exec_EvalPoint1(GLint i)
758 {
759 GET_CURRENT_CONTEXT(ctx);
760 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
761 (GLfloat) ctx->Eval.MapGrid1un);
762 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
763
764 vbo_exec_EvalCoord1f(u);
765 }
766
767
768 static void GLAPIENTRY
769 vbo_exec_EvalPoint2(GLint i, GLint j)
770 {
771 GET_CURRENT_CONTEXT(ctx);
772 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
773 (GLfloat) ctx->Eval.MapGrid2un);
774 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
775 (GLfloat) ctx->Eval.MapGrid2vn);
776 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
777 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
778
779 vbo_exec_EvalCoord2f(u, v);
780 }
781
782
783 /**
784 * Called via glBegin.
785 */
786 static void GLAPIENTRY
787 vbo_exec_Begin(GLenum mode)
788 {
789 GET_CURRENT_CONTEXT(ctx);
790 struct vbo_context *vbo = vbo_context(ctx);
791 struct vbo_exec_context *exec = &vbo->exec;
792 int i;
793
794 if (_mesa_inside_begin_end(ctx)) {
795 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
796 return;
797 }
798
799 if (!_mesa_valid_prim_mode(ctx, mode, "glBegin")) {
800 return;
801 }
802
803 if (!_mesa_valid_to_render(ctx, "glBegin")) {
804 return;
805 }
806
807 /* Heuristic: attempt to isolate attributes occurring outside
808 * begin/end pairs.
809 *
810 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
811 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
812 */
813 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
814 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
815
816 i = exec->vtx.prim_count++;
817 exec->vtx.prim[i].mode = mode;
818 exec->vtx.prim[i].begin = 1;
819 exec->vtx.prim[i].end = 0;
820 exec->vtx.prim[i].indexed = 0;
821 exec->vtx.prim[i].start = exec->vtx.vert_count;
822 exec->vtx.prim[i].count = 0;
823 exec->vtx.prim[i].num_instances = 1;
824 exec->vtx.prim[i].base_instance = 0;
825
826 ctx->Driver.CurrentExecPrimitive = mode;
827
828 ctx->Exec = ctx->BeginEnd;
829
830 /* We may have been called from a display list, in which case we should
831 * leave dlist.c's dispatch table in place.
832 */
833 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
834 ctx->CurrentServerDispatch = ctx->Exec;
835 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
836 ctx->CurrentClientDispatch = ctx->Exec;
837 _glapi_set_dispatch(ctx->CurrentClientDispatch);
838 } else {
839 assert(ctx->CurrentClientDispatch == ctx->Save);
840 }
841 }
842
843
844 /**
845 * Try to merge / concatenate the two most recent VBO primitives.
846 */
847 static void
848 try_vbo_merge(struct vbo_exec_context *exec)
849 {
850 struct _mesa_prim *cur = &exec->vtx.prim[exec->vtx.prim_count - 1];
851
852 assert(exec->vtx.prim_count >= 1);
853
854 vbo_try_prim_conversion(cur);
855
856 if (exec->vtx.prim_count >= 2) {
857 struct _mesa_prim *prev = &exec->vtx.prim[exec->vtx.prim_count - 2];
858 assert(prev == cur - 1);
859
860 if (vbo_can_merge_prims(prev, cur)) {
861 assert(cur->begin);
862 assert(cur->end);
863 assert(prev->begin);
864 assert(prev->end);
865 vbo_merge_prims(prev, cur);
866 exec->vtx.prim_count--; /* drop the last primitive */
867 }
868 }
869 }
870
871
872 /**
873 * Called via glEnd.
874 */
875 static void GLAPIENTRY
876 vbo_exec_End(void)
877 {
878 GET_CURRENT_CONTEXT(ctx);
879 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
880
881 if (!_mesa_inside_begin_end(ctx)) {
882 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
883 return;
884 }
885
886 ctx->Exec = ctx->OutsideBeginEnd;
887
888 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
889 ctx->CurrentServerDispatch = ctx->Exec;
890 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd) {
891 ctx->CurrentClientDispatch = ctx->Exec;
892 _glapi_set_dispatch(ctx->CurrentClientDispatch);
893 }
894
895 if (exec->vtx.prim_count > 0) {
896 /* close off current primitive */
897 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
898 unsigned count = exec->vtx.vert_count - last_prim->start;
899
900 last_prim->end = 1;
901 last_prim->count = count;
902
903 if (count)
904 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
905
906 /* Special handling for GL_LINE_LOOP */
907 if (last_prim->mode == GL_LINE_LOOP && last_prim->begin == 0) {
908 /* We're finishing drawing a line loop. Append 0th vertex onto
909 * end of vertex buffer so we can draw it as a line strip.
910 */
911 const fi_type *src = exec->vtx.buffer_map +
912 last_prim->start * exec->vtx.vertex_size;
913 fi_type *dst = exec->vtx.buffer_map +
914 exec->vtx.vert_count * exec->vtx.vertex_size;
915
916 /* copy 0th vertex to end of buffer */
917 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
918
919 last_prim->start++; /* skip vertex0 */
920 /* note that last_prim->count stays unchanged */
921 last_prim->mode = GL_LINE_STRIP;
922
923 /* Increment the vertex count so the next primitive doesn't
924 * overwrite the last vertex which we just added.
925 */
926 exec->vtx.vert_count++;
927 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
928 }
929
930 try_vbo_merge(exec);
931 }
932
933 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
934
935 if (exec->vtx.prim_count == VBO_MAX_PRIM)
936 vbo_exec_vtx_flush(exec);
937
938 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
939 _mesa_flush(ctx);
940 }
941 }
942
943
944 /**
945 * Called via glPrimitiveRestartNV()
946 */
947 static void GLAPIENTRY
948 vbo_exec_PrimitiveRestartNV(void)
949 {
950 GLenum curPrim;
951 GET_CURRENT_CONTEXT(ctx);
952
953 curPrim = ctx->Driver.CurrentExecPrimitive;
954
955 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
956 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
957 }
958 else {
959 vbo_exec_End();
960 vbo_exec_Begin(curPrim);
961 }
962 }
963
964
965 static void
966 vbo_exec_vtxfmt_init(struct vbo_exec_context *exec)
967 {
968 struct gl_context *ctx = exec->ctx;
969 GLvertexformat *vfmt = &exec->vtxfmt;
970
971 #define NAME_AE(x) _ae_##x
972 #define NAME_CALLLIST(x) _mesa_##x
973 #define NAME(x) vbo_exec_##x
974 #define NAME_ES(x) _es_##x
975
976 #include "vbo_init_tmp.h"
977 }
978
979
980 static void
981 vbo_reset_all_attr(struct vbo_exec_context *exec)
982 {
983 while (exec->vtx.enabled) {
984 const int i = u_bit_scan64(&exec->vtx.enabled);
985
986 /* Reset the vertex attribute by setting its size to zero. */
987 exec->vtx.attr[i].size = 0;
988 exec->vtx.attr[i].type = GL_FLOAT;
989 exec->vtx.attr[i].active_size = 0;
990 exec->vtx.attrptr[i] = NULL;
991 }
992
993 exec->vtx.vertex_size = 0;
994 }
995
996
997 void
998 vbo_exec_vtx_init(struct vbo_exec_context *exec, bool use_buffer_objects)
999 {
1000 struct gl_context *ctx = exec->ctx;
1001
1002 if (use_buffer_objects) {
1003 /* Use buffer objects for immediate mode. */
1004 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1005
1006 exec->vtx.bufferobj = ctx->Driver.NewBufferObject(ctx, IMM_BUFFER_NAME);
1007
1008 /* Map the buffer. */
1009 vbo_exec_vtx_map(exec);
1010 assert(exec->vtx.buffer_ptr);
1011 } else {
1012 /* Use allocated memory for immediate mode. */
1013 _mesa_reference_buffer_object(ctx,
1014 &exec->vtx.bufferobj,
1015 ctx->Shared->NullBufferObj);
1016
1017 exec->vtx.buffer_map = _mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64);
1018 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
1019 }
1020
1021 vbo_exec_vtxfmt_init(exec);
1022 _mesa_noop_vtxfmt_init(ctx, &exec->vtxfmt_noop);
1023
1024 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1025 vbo_reset_all_attr(exec);
1026 }
1027
1028
1029 void
1030 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1031 {
1032 /* using a real VBO for vertex data */
1033 struct gl_context *ctx = exec->ctx;
1034
1035 /* True VBOs should already be unmapped
1036 */
1037 if (exec->vtx.buffer_map) {
1038 assert(exec->vtx.bufferobj->Name == 0 ||
1039 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1040 if (exec->vtx.bufferobj->Name == 0) {
1041 _mesa_align_free(exec->vtx.buffer_map);
1042 exec->vtx.buffer_map = NULL;
1043 exec->vtx.buffer_ptr = NULL;
1044 }
1045 }
1046
1047 /* Free the vertex buffer. Unmap first if needed.
1048 */
1049 if (_mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1050 ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1051 }
1052 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1053 }
1054
1055
1056 /**
1057 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1058 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1059 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1060 * __struct gl_contextRec::Current and gl_light_attrib::Material
1061 *
1062 * Note that the default T&L engine never clears the
1063 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1064 *
1065 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1066 */
1067 void
1068 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1069 {
1070 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1071
1072 #ifndef NDEBUG
1073 /* debug check: make sure we don't get called recursively */
1074 exec->flush_call_depth++;
1075 assert(exec->flush_call_depth == 1);
1076 #endif
1077
1078 if (_mesa_inside_begin_end(ctx)) {
1079 /* We've had glBegin but not glEnd! */
1080 #ifndef NDEBUG
1081 exec->flush_call_depth--;
1082 assert(exec->flush_call_depth == 0);
1083 #endif
1084 return;
1085 }
1086
1087 /* Flush (draw). */
1088 vbo_exec_FlushVertices_internal(exec, flags);
1089
1090 #ifndef NDEBUG
1091 exec->flush_call_depth--;
1092 assert(exec->flush_call_depth == 0);
1093 #endif
1094 }
1095
1096
1097 void GLAPIENTRY
1098 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1099 {
1100 vbo_exec_Color4f(r, g, b, a);
1101 }
1102
1103
1104 void GLAPIENTRY
1105 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1106 {
1107 vbo_exec_Normal3f(x, y, z);
1108 }
1109
1110
1111 void GLAPIENTRY
1112 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1113 {
1114 vbo_exec_MultiTexCoord4f(target, s, t, r, q);
1115 }
1116
1117
1118 void GLAPIENTRY
1119 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1120 {
1121 vbo_exec_Materialfv(face, pname, params);
1122 }
1123
1124
1125 void GLAPIENTRY
1126 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1127 {
1128 GLfloat p[4];
1129 p[0] = param;
1130 p[1] = p[2] = p[3] = 0.0F;
1131 vbo_exec_Materialfv(face, pname, p);
1132 }
1133
1134
1135 /**
1136 * A special version of glVertexAttrib4f that does not treat index 0 as
1137 * VBO_ATTRIB_POS.
1138 */
1139 static void
1140 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1141 {
1142 GET_CURRENT_CONTEXT(ctx);
1143 if (index < MAX_VERTEX_GENERIC_ATTRIBS)
1144 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1145 else
1146 ERROR(GL_INVALID_VALUE);
1147 }
1148
1149 void GLAPIENTRY
1150 _es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1151 {
1152 VertexAttrib4f_nopos(index, x, y, z, w);
1153 }
1154
1155
1156 void GLAPIENTRY
1157 _es_VertexAttrib1f(GLuint indx, GLfloat x)
1158 {
1159 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1160 }
1161
1162
1163 void GLAPIENTRY
1164 _es_VertexAttrib1fv(GLuint indx, const GLfloat* values)
1165 {
1166 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1167 }
1168
1169
1170 void GLAPIENTRY
1171 _es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y)
1172 {
1173 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1174 }
1175
1176
1177 void GLAPIENTRY
1178 _es_VertexAttrib2fv(GLuint indx, const GLfloat* values)
1179 {
1180 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1181 }
1182
1183
1184 void GLAPIENTRY
1185 _es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1186 {
1187 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1188 }
1189
1190
1191 void GLAPIENTRY
1192 _es_VertexAttrib3fv(GLuint indx, const GLfloat* values)
1193 {
1194 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1195 }
1196
1197
1198 void GLAPIENTRY
1199 _es_VertexAttrib4fv(GLuint indx, const GLfloat* values)
1200 {
1201 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1202 }