f106fc2d970fa093c50792fb8a5c3f7ec2aacb45
[mesa.git] / src / mesa / vbo / vbo_exec_api.c
1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46
47 #include "vbo_noop.h"
48 #include "vbo_private.h"
49
50
51 /** ID/name for immediate-mode VBO */
52 #define IMM_BUFFER_NAME 0xaabbccdd
53
54
55 static void
56 vbo_reset_all_attr(struct vbo_exec_context *exec);
57
58
59 /**
60 * Close off the last primitive, execute the buffer, restart the
61 * primitive. This is called when we fill a vertex buffer before
62 * hitting glEnd.
63 */
64 static void
65 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
66 {
67 if (exec->vtx.prim_count == 0) {
68 exec->vtx.copied.nr = 0;
69 exec->vtx.vert_count = 0;
70 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
71 }
72 else {
73 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
74 const GLuint last_begin = last_prim->begin;
75 GLuint last_count;
76
77 if (_mesa_inside_begin_end(exec->ctx)) {
78 last_prim->count = exec->vtx.vert_count - last_prim->start;
79 }
80
81 last_count = last_prim->count;
82
83 /* Special handling for wrapping GL_LINE_LOOP */
84 if (last_prim->mode == GL_LINE_LOOP &&
85 last_count > 0 &&
86 !last_prim->end) {
87 /* draw this section of the incomplete line loop as a line strip */
88 last_prim->mode = GL_LINE_STRIP;
89 if (!last_prim->begin) {
90 /* This is not the first section of the line loop, so don't
91 * draw the 0th vertex. We're saving it until we draw the
92 * very last section of the loop.
93 */
94 last_prim->start++;
95 last_prim->count--;
96 }
97 }
98
99 /* Execute the buffer and save copied vertices.
100 */
101 if (exec->vtx.vert_count)
102 vbo_exec_vtx_flush(exec);
103 else {
104 exec->vtx.prim_count = 0;
105 exec->vtx.copied.nr = 0;
106 }
107
108 /* Emit a glBegin to start the new list.
109 */
110 assert(exec->vtx.prim_count == 0);
111
112 if (_mesa_inside_begin_end(exec->ctx)) {
113 exec->vtx.prim[0].mode = exec->ctx->Driver.CurrentExecPrimitive;
114 exec->vtx.prim[0].begin = 0;
115 exec->vtx.prim[0].end = 0;
116 exec->vtx.prim[0].start = 0;
117 exec->vtx.prim[0].count = 0;
118 exec->vtx.prim_count++;
119
120 if (exec->vtx.copied.nr == last_count)
121 exec->vtx.prim[0].begin = last_begin;
122 }
123 }
124 }
125
126
127 /**
128 * Deal with buffer wrapping where provoked by the vertex buffer
129 * filling up, as opposed to upgrade_vertex().
130 */
131 static void
132 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
133 {
134 unsigned numComponents;
135
136 /* Run pipeline on current vertices, copy wrapped vertices
137 * to exec->vtx.copied.
138 */
139 vbo_exec_wrap_buffers(exec);
140
141 if (!exec->vtx.buffer_ptr) {
142 /* probably ran out of memory earlier when allocating the VBO */
143 return;
144 }
145
146 /* Copy stored stored vertices to start of new list.
147 */
148 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
149
150 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
151 memcpy(exec->vtx.buffer_ptr,
152 exec->vtx.copied.buffer,
153 numComponents * sizeof(fi_type));
154 exec->vtx.buffer_ptr += numComponents;
155 exec->vtx.vert_count += exec->vtx.copied.nr;
156
157 exec->vtx.copied.nr = 0;
158 }
159
160
161 /**
162 * Copy the active vertex's values to the ctx->Current fields.
163 */
164 static void
165 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
166 {
167 struct gl_context *ctx = exec->ctx;
168 struct vbo_context *vbo = vbo_context(ctx);
169 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
170
171 while (enabled) {
172 const int i = u_bit_scan64(&enabled);
173
174 /* Note: the exec->vtx.current[i] pointers point into the
175 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
176 */
177 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
178 fi_type tmp[8]; /* space for doubles */
179 int dmul = 1;
180
181 if (exec->vtx.attr[i].type == GL_DOUBLE ||
182 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB)
183 dmul = 2;
184
185 assert(exec->vtx.attr[i].size);
186
187 if (exec->vtx.attr[i].type == GL_DOUBLE ||
188 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
189 memset(tmp, 0, sizeof(tmp));
190 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
191 } else {
192 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
193 exec->vtx.attr[i].size,
194 exec->vtx.attrptr[i],
195 exec->vtx.attr[i].type);
196 }
197
198 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
199 memcmp(current, tmp, 4 * sizeof(GLfloat) * dmul) != 0) {
200 memcpy(current, tmp, 4 * sizeof(GLfloat) * dmul);
201
202 /* Given that we explicitly state size here, there is no need
203 * for the COPY_CLEAN above, could just copy 16 bytes and be
204 * done. The only problem is when Mesa accesses ctx->Current
205 * directly.
206 */
207 /* Size here is in components - not bytes */
208 vbo_set_vertex_format(&vbo->current[i].Format,
209 exec->vtx.attr[i].size / dmul,
210 exec->vtx.attr[i].type);
211
212 /* This triggers rather too much recalculation of Mesa state
213 * that doesn't get used (eg light positions).
214 */
215 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT &&
216 i <= VBO_ATTRIB_MAT_BACK_INDEXES)
217 ctx->NewState |= _NEW_LIGHT;
218
219 ctx->NewState |= _NEW_CURRENT_ATTRIB;
220 }
221 }
222
223 /* Colormaterial -- this kindof sucks.
224 */
225 if (ctx->Light.ColorMaterialEnabled &&
226 exec->vtx.attr[VBO_ATTRIB_COLOR0].size) {
227 _mesa_update_color_material(ctx,
228 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
229 }
230 }
231
232
233 /**
234 * Flush existing data, set new attrib size, replay copied vertices.
235 * This is called when we transition from a small vertex attribute size
236 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
237 * We need to go back over the previous 2-component texcoords and insert
238 * zero and one values.
239 * \param attr VBO_ATTRIB_x vertex attribute value
240 */
241 static void
242 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
243 GLuint attr, GLuint newSize)
244 {
245 struct gl_context *ctx = exec->ctx;
246 struct vbo_context *vbo = vbo_context(ctx);
247 const GLint lastcount = exec->vtx.vert_count;
248 fi_type *old_attrptr[VBO_ATTRIB_MAX];
249 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
250 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
251 const GLuint oldSize = exec->vtx.attr[attr].size;
252 GLuint i;
253
254 assert(attr < VBO_ATTRIB_MAX);
255
256 /* Run pipeline on current vertices, copy wrapped vertices
257 * to exec->vtx.copied.
258 */
259 vbo_exec_wrap_buffers(exec);
260
261 if (unlikely(exec->vtx.copied.nr)) {
262 /* We're in the middle of a primitive, keep the old vertex
263 * format around to be able to translate the copied vertices to
264 * the new format.
265 */
266 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
267 }
268
269 /* Heuristic: Attempt to isolate attributes received outside
270 * begin/end so that they don't bloat the vertices.
271 */
272 if (!_mesa_inside_begin_end(ctx) &&
273 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
274 vbo_exec_copy_to_current(exec);
275 vbo_reset_all_attr(exec);
276 }
277
278 /* Fix up sizes:
279 */
280 exec->vtx.attr[attr].size = newSize;
281 exec->vtx.vertex_size += newSize - oldSize;
282 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
283 exec->vtx.max_vert = vbo_compute_max_verts(exec);
284 exec->vtx.vert_count = 0;
285 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
286 exec->vtx.enabled |= BITFIELD64_BIT(attr);
287
288 if (attr != 0) {
289 if (unlikely(oldSize)) {
290 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
291
292 /* If there are attribs after the resized attrib... */
293 if (offset + oldSize < old_vtx_size_no_pos) {
294 int size_diff = newSize - oldSize;
295 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
296 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
297 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
298 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
299
300 if (size_diff < 0) {
301 /* Decreasing the size: Copy from first to last to move
302 * elements to the left.
303 */
304 fi_type *old_end = old_last + 1;
305 fi_type *old = old_first;
306 fi_type *new = new_first;
307
308 do {
309 *new++ = *old++;
310 } while (old != old_end);
311 } else {
312 /* Increasing the size: Copy from last to first to move
313 * elements to the right.
314 */
315 fi_type *old_end = old_first - 1;
316 fi_type *old = old_last;
317 fi_type *new = new_last;
318
319 do {
320 *new-- = *old--;
321 } while (old != old_end);
322 }
323
324 /* Update pointers to attribs, because we moved them. */
325 GLbitfield64 enabled = exec->vtx.enabled &
326 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
327 ~BITFIELD64_BIT(attr);
328 while (enabled) {
329 unsigned i = u_bit_scan64(&enabled);
330
331 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
332 exec->vtx.attrptr[i] += size_diff;
333 }
334 }
335 } else {
336 /* Just have to append the new attribute at the end */
337 exec->vtx.attrptr[attr] = exec->vtx.vertex +
338 exec->vtx.vertex_size_no_pos - newSize;
339 }
340 }
341
342 /* The position is always last. */
343 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
344
345 /* Replay stored vertices to translate them
346 * to new format here.
347 *
348 * -- No need to replay - just copy piecewise
349 */
350 if (unlikely(exec->vtx.copied.nr)) {
351 fi_type *data = exec->vtx.copied.buffer;
352 fi_type *dest = exec->vtx.buffer_ptr;
353
354 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
355
356 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
357 GLbitfield64 enabled = exec->vtx.enabled;
358 while (enabled) {
359 const int j = u_bit_scan64(&enabled);
360 GLuint sz = exec->vtx.attr[j].size;
361 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
362 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
363
364 assert(sz);
365
366 if (j == attr) {
367 if (oldSize) {
368 fi_type tmp[4];
369 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
370 data + old_offset,
371 exec->vtx.attr[j].type);
372 COPY_SZ_4V(dest + new_offset, newSize, tmp);
373 } else {
374 fi_type *current = (fi_type *)vbo->current[j].Ptr;
375 COPY_SZ_4V(dest + new_offset, sz, current);
376 }
377 }
378 else {
379 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
380 }
381 }
382
383 data += old_vtx_size;
384 dest += exec->vtx.vertex_size;
385 }
386
387 exec->vtx.buffer_ptr = dest;
388 exec->vtx.vert_count += exec->vtx.copied.nr;
389 exec->vtx.copied.nr = 0;
390 }
391 }
392
393
394 /**
395 * This is when a vertex attribute transitions to a different size.
396 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
397 * glTexCoord4f() call. We promote the array from size=2 to size=4.
398 * \param newSize size of new vertex (number of 32-bit words).
399 * \param attr VBO_ATTRIB_x vertex attribute value
400 */
401 static void
402 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
403 GLuint newSize, GLenum newType)
404 {
405 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
406
407 assert(attr < VBO_ATTRIB_MAX);
408
409 if (newSize > exec->vtx.attr[attr].size ||
410 newType != exec->vtx.attr[attr].type) {
411 /* New size is larger. Need to flush existing vertices and get
412 * an enlarged vertex format.
413 */
414 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize);
415 }
416 else if (newSize < exec->vtx.attr[attr].active_size) {
417 GLuint i;
418 const fi_type *id =
419 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
420
421 /* New size is smaller - just need to fill in some
422 * zeros. Don't need to flush or wrap.
423 */
424 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
425 exec->vtx.attrptr[attr][i-1] = id[i-1];
426 }
427
428 exec->vtx.attr[attr].active_size = newSize;
429 exec->vtx.attr[attr].type = newType;
430
431 /* Does setting NeedFlush belong here? Necessitates resetting
432 * vtxfmt on each flush (otherwise flags won't get reset
433 * afterwards).
434 */
435 if (attr == 0)
436 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
437 }
438
439
440 /**
441 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
442 * It depends on a few things, including whether we're inside or outside
443 * of glBegin/glEnd.
444 */
445 static inline bool
446 is_vertex_position(const struct gl_context *ctx, GLuint index)
447 {
448 return (index == 0 &&
449 _mesa_attr_zero_aliases_vertex(ctx) &&
450 _mesa_inside_begin_end(ctx));
451 }
452
453 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
454 #if UTIL_ARCH_LITTLE_ENDIAN
455 #define SET_64BIT(dst32, u64) do { \
456 *(dst32)++ = (u64); \
457 *(dst32)++ = (uint64_t)(u64) >> 32; \
458 } while (0)
459 #else
460 #define SET_64BIT(dst32, u64) do { \
461 *(dst32)++ = (uint64_t)(u64) >> 32; \
462 *(dst32)++ = (u64); \
463 } while (0)
464 #endif
465
466
467 /**
468 * This macro is used to implement all the glVertex, glColor, glTexCoord,
469 * glVertexAttrib, etc functions.
470 * \param A VBO_ATTRIB_x attribute index
471 * \param N attribute size (1..4)
472 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
473 * \param C cast type (uint32_t or uint64_t)
474 * \param V0, V1, v2, V3 attribute value
475 */
476 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
477 do { \
478 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
479 int sz = (sizeof(C) / sizeof(GLfloat)); \
480 \
481 assert(sz == 1 || sz == 2); \
482 \
483 /* check if attribute size or type is changing */ \
484 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
485 exec->vtx.attr[A].type != T)) { \
486 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
487 } \
488 \
489 /* store a copy of the attribute in exec except for glVertex */ \
490 if ((A) != 0) { \
491 C *dest = (C *)exec->vtx.attrptr[A]; \
492 if (N>0) dest[0] = V0; \
493 if (N>1) dest[1] = V1; \
494 if (N>2) dest[2] = V2; \
495 if (N>3) dest[3] = V3; \
496 assert(exec->vtx.attr[A].type == T); \
497 } \
498 \
499 if ((A) == 0) { \
500 /* This is a glVertex call */ \
501 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
502 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
503 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
504 \
505 /* Copy over attributes from exec. */ \
506 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
507 *dst++ = *src++; \
508 \
509 /* Store the position, which is always last and can have 32 or */ \
510 /* 64 bits per channel. */ \
511 if (sizeof(C) == 4) { \
512 if (N > 0) *dst++ = V0; \
513 if (N > 1) *dst++ = V1; \
514 if (N > 2) *dst++ = V2; \
515 if (N > 3) *dst++ = V3; \
516 } else { \
517 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
518 /* separately */ \
519 if (N > 0) SET_64BIT(dst, V0); \
520 if (N > 1) SET_64BIT(dst, V1); \
521 if (N > 2) SET_64BIT(dst, V2); \
522 if (N > 3) SET_64BIT(dst, V3); \
523 } \
524 \
525 /* dst now points at the beginning of the next vertex */ \
526 exec->vtx.buffer_ptr = (fi_type*)dst; \
527 \
528 /* Don't set FLUSH_UPDATE_CURRENT because */ \
529 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
530 \
531 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
532 vbo_exec_vtx_wrap(exec); \
533 } else { \
534 /* we now have accumulated per-vertex attributes */ \
535 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
536 } \
537 } while (0)
538
539
540 #undef ERROR
541 #define ERROR(err) _mesa_error(ctx, err, __func__)
542 #define TAG(x) vbo_exec_##x
543
544 #include "vbo_attrib_tmp.h"
545
546
547
548 /**
549 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
550 * this may be a (partial) no-op.
551 */
552 static void GLAPIENTRY
553 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
554 {
555 GLbitfield updateMats;
556 GET_CURRENT_CONTEXT(ctx);
557
558 /* This function should be a no-op when it tries to update material
559 * attributes which are currently tracking glColor via glColorMaterial.
560 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
561 * indicating which material attributes can actually be updated below.
562 */
563 if (ctx->Light.ColorMaterialEnabled) {
564 updateMats = ~ctx->Light._ColorMaterialBitmask;
565 }
566 else {
567 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
568 updateMats = ALL_MATERIAL_BITS;
569 }
570
571 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
572 updateMats &= FRONT_MATERIAL_BITS;
573 }
574 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
575 updateMats &= BACK_MATERIAL_BITS;
576 }
577 else if (face != GL_FRONT_AND_BACK) {
578 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
579 return;
580 }
581
582 switch (pname) {
583 case GL_EMISSION:
584 if (updateMats & MAT_BIT_FRONT_EMISSION)
585 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
586 if (updateMats & MAT_BIT_BACK_EMISSION)
587 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
588 break;
589 case GL_AMBIENT:
590 if (updateMats & MAT_BIT_FRONT_AMBIENT)
591 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
592 if (updateMats & MAT_BIT_BACK_AMBIENT)
593 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
594 break;
595 case GL_DIFFUSE:
596 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
597 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
598 if (updateMats & MAT_BIT_BACK_DIFFUSE)
599 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
600 break;
601 case GL_SPECULAR:
602 if (updateMats & MAT_BIT_FRONT_SPECULAR)
603 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
604 if (updateMats & MAT_BIT_BACK_SPECULAR)
605 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
606 break;
607 case GL_SHININESS:
608 if (*params < 0 || *params > ctx->Const.MaxShininess) {
609 _mesa_error(ctx, GL_INVALID_VALUE,
610 "glMaterial(invalid shininess: %f out range [0, %f])",
611 *params, ctx->Const.MaxShininess);
612 return;
613 }
614 if (updateMats & MAT_BIT_FRONT_SHININESS)
615 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
616 if (updateMats & MAT_BIT_BACK_SHININESS)
617 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
618 break;
619 case GL_COLOR_INDEXES:
620 if (ctx->API != API_OPENGL_COMPAT) {
621 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
622 return;
623 }
624 if (updateMats & MAT_BIT_FRONT_INDEXES)
625 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
626 if (updateMats & MAT_BIT_BACK_INDEXES)
627 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
628 break;
629 case GL_AMBIENT_AND_DIFFUSE:
630 if (updateMats & MAT_BIT_FRONT_AMBIENT)
631 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
632 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
633 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
634 if (updateMats & MAT_BIT_BACK_AMBIENT)
635 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
636 if (updateMats & MAT_BIT_BACK_DIFFUSE)
637 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
638 break;
639 default:
640 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
641 return;
642 }
643 }
644
645
646 /**
647 * Flush (draw) vertices.
648 *
649 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
650 */
651 static void
652 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
653 {
654 struct gl_context *ctx = exec->ctx;
655
656 if (flags & FLUSH_STORED_VERTICES) {
657 if (exec->vtx.vert_count) {
658 vbo_exec_vtx_flush(exec);
659 }
660
661 if (exec->vtx.vertex_size) {
662 vbo_exec_copy_to_current(exec);
663 vbo_reset_all_attr(exec);
664 }
665
666 /* All done. */
667 ctx->Driver.NeedFlush = 0;
668 } else {
669 assert(flags == FLUSH_UPDATE_CURRENT);
670
671 /* Note that the vertex size is unchanged.
672 * (vbo_reset_all_attr isn't called)
673 */
674 vbo_exec_copy_to_current(exec);
675
676 /* Only FLUSH_UPDATE_CURRENT is done. */
677 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
678 }
679 }
680
681
682 static void GLAPIENTRY
683 vbo_exec_EvalCoord1f(GLfloat u)
684 {
685 GET_CURRENT_CONTEXT(ctx);
686 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
687
688 {
689 GLint i;
690 if (exec->eval.recalculate_maps)
691 vbo_exec_eval_update(exec);
692
693 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
694 if (exec->eval.map1[i].map)
695 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
696 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
697 }
698 }
699
700 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
701 exec->vtx.vertex_size * sizeof(GLfloat));
702
703 vbo_exec_do_EvalCoord1f(exec, u);
704
705 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
706 exec->vtx.vertex_size * sizeof(GLfloat));
707 }
708
709
710 static void GLAPIENTRY
711 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v)
712 {
713 GET_CURRENT_CONTEXT(ctx);
714 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
715
716 {
717 GLint i;
718 if (exec->eval.recalculate_maps)
719 vbo_exec_eval_update(exec);
720
721 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
722 if (exec->eval.map2[i].map)
723 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
724 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
725 }
726
727 if (ctx->Eval.AutoNormal)
728 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
729 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
730 }
731
732 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
733 exec->vtx.vertex_size * sizeof(GLfloat));
734
735 vbo_exec_do_EvalCoord2f(exec, u, v);
736
737 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
738 exec->vtx.vertex_size * sizeof(GLfloat));
739 }
740
741
742 static void GLAPIENTRY
743 vbo_exec_EvalCoord1fv(const GLfloat *u)
744 {
745 vbo_exec_EvalCoord1f(u[0]);
746 }
747
748
749 static void GLAPIENTRY
750 vbo_exec_EvalCoord2fv(const GLfloat *u)
751 {
752 vbo_exec_EvalCoord2f(u[0], u[1]);
753 }
754
755
756 static void GLAPIENTRY
757 vbo_exec_EvalPoint1(GLint i)
758 {
759 GET_CURRENT_CONTEXT(ctx);
760 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
761 (GLfloat) ctx->Eval.MapGrid1un);
762 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
763
764 vbo_exec_EvalCoord1f(u);
765 }
766
767
768 static void GLAPIENTRY
769 vbo_exec_EvalPoint2(GLint i, GLint j)
770 {
771 GET_CURRENT_CONTEXT(ctx);
772 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
773 (GLfloat) ctx->Eval.MapGrid2un);
774 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
775 (GLfloat) ctx->Eval.MapGrid2vn);
776 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
777 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
778
779 vbo_exec_EvalCoord2f(u, v);
780 }
781
782
783 /**
784 * Called via glBegin.
785 */
786 static void GLAPIENTRY
787 vbo_exec_Begin(GLenum mode)
788 {
789 GET_CURRENT_CONTEXT(ctx);
790 struct vbo_context *vbo = vbo_context(ctx);
791 struct vbo_exec_context *exec = &vbo->exec;
792 int i;
793
794 if (_mesa_inside_begin_end(ctx)) {
795 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
796 return;
797 }
798
799 if (!_mesa_valid_prim_mode(ctx, mode, "glBegin")) {
800 return;
801 }
802
803 if (!_mesa_valid_to_render(ctx, "glBegin")) {
804 return;
805 }
806
807 /* Heuristic: attempt to isolate attributes occurring outside
808 * begin/end pairs.
809 *
810 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
811 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
812 */
813 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
814 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
815
816 i = exec->vtx.prim_count++;
817 exec->vtx.prim[i].mode = mode;
818 exec->vtx.prim[i].begin = 1;
819 exec->vtx.prim[i].end = 0;
820 exec->vtx.prim[i].indexed = 0;
821 exec->vtx.prim[i].pad = 0;
822 exec->vtx.prim[i].start = exec->vtx.vert_count;
823 exec->vtx.prim[i].count = 0;
824 exec->vtx.prim[i].num_instances = 1;
825 exec->vtx.prim[i].base_instance = 0;
826
827 ctx->Driver.CurrentExecPrimitive = mode;
828
829 ctx->Exec = ctx->BeginEnd;
830
831 /* We may have been called from a display list, in which case we should
832 * leave dlist.c's dispatch table in place.
833 */
834 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
835 ctx->CurrentServerDispatch = ctx->Exec;
836 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
837 ctx->CurrentClientDispatch = ctx->Exec;
838 _glapi_set_dispatch(ctx->CurrentClientDispatch);
839 } else {
840 assert(ctx->CurrentClientDispatch == ctx->Save);
841 }
842 }
843
844
845 /**
846 * Try to merge / concatenate the two most recent VBO primitives.
847 */
848 static void
849 try_vbo_merge(struct vbo_exec_context *exec)
850 {
851 struct _mesa_prim *cur = &exec->vtx.prim[exec->vtx.prim_count - 1];
852
853 assert(exec->vtx.prim_count >= 1);
854
855 vbo_try_prim_conversion(cur);
856
857 if (exec->vtx.prim_count >= 2) {
858 struct _mesa_prim *prev = &exec->vtx.prim[exec->vtx.prim_count - 2];
859 assert(prev == cur - 1);
860
861 if (vbo_can_merge_prims(prev, cur)) {
862 assert(cur->begin);
863 assert(cur->end);
864 assert(prev->begin);
865 assert(prev->end);
866 vbo_merge_prims(prev, cur);
867 exec->vtx.prim_count--; /* drop the last primitive */
868 }
869 }
870 }
871
872
873 /**
874 * Called via glEnd.
875 */
876 static void GLAPIENTRY
877 vbo_exec_End(void)
878 {
879 GET_CURRENT_CONTEXT(ctx);
880 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
881
882 if (!_mesa_inside_begin_end(ctx)) {
883 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
884 return;
885 }
886
887 ctx->Exec = ctx->OutsideBeginEnd;
888
889 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
890 ctx->CurrentServerDispatch = ctx->Exec;
891 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd) {
892 ctx->CurrentClientDispatch = ctx->Exec;
893 _glapi_set_dispatch(ctx->CurrentClientDispatch);
894 }
895
896 if (exec->vtx.prim_count > 0) {
897 /* close off current primitive */
898 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
899 unsigned count = exec->vtx.vert_count - last_prim->start;
900
901 last_prim->end = 1;
902 last_prim->count = count;
903
904 if (count)
905 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
906
907 /* Special handling for GL_LINE_LOOP */
908 if (last_prim->mode == GL_LINE_LOOP && last_prim->begin == 0) {
909 /* We're finishing drawing a line loop. Append 0th vertex onto
910 * end of vertex buffer so we can draw it as a line strip.
911 */
912 const fi_type *src = exec->vtx.buffer_map +
913 last_prim->start * exec->vtx.vertex_size;
914 fi_type *dst = exec->vtx.buffer_map +
915 exec->vtx.vert_count * exec->vtx.vertex_size;
916
917 /* copy 0th vertex to end of buffer */
918 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
919
920 last_prim->start++; /* skip vertex0 */
921 /* note that last_prim->count stays unchanged */
922 last_prim->mode = GL_LINE_STRIP;
923
924 /* Increment the vertex count so the next primitive doesn't
925 * overwrite the last vertex which we just added.
926 */
927 exec->vtx.vert_count++;
928 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
929 }
930
931 try_vbo_merge(exec);
932 }
933
934 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
935
936 if (exec->vtx.prim_count == VBO_MAX_PRIM)
937 vbo_exec_vtx_flush(exec);
938
939 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
940 _mesa_flush(ctx);
941 }
942 }
943
944
945 /**
946 * Called via glPrimitiveRestartNV()
947 */
948 static void GLAPIENTRY
949 vbo_exec_PrimitiveRestartNV(void)
950 {
951 GLenum curPrim;
952 GET_CURRENT_CONTEXT(ctx);
953
954 curPrim = ctx->Driver.CurrentExecPrimitive;
955
956 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
957 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
958 }
959 else {
960 vbo_exec_End();
961 vbo_exec_Begin(curPrim);
962 }
963 }
964
965
966 static void
967 vbo_exec_vtxfmt_init(struct vbo_exec_context *exec)
968 {
969 struct gl_context *ctx = exec->ctx;
970 GLvertexformat *vfmt = &exec->vtxfmt;
971
972 #define NAME_AE(x) _ae_##x
973 #define NAME_CALLLIST(x) _mesa_##x
974 #define NAME(x) vbo_exec_##x
975 #define NAME_ES(x) _es_##x
976
977 #include "vbo_init_tmp.h"
978 }
979
980
981 static void
982 vbo_reset_all_attr(struct vbo_exec_context *exec)
983 {
984 while (exec->vtx.enabled) {
985 const int i = u_bit_scan64(&exec->vtx.enabled);
986
987 /* Reset the vertex attribute by setting its size to zero. */
988 exec->vtx.attr[i].size = 0;
989 exec->vtx.attr[i].type = GL_FLOAT;
990 exec->vtx.attr[i].active_size = 0;
991 exec->vtx.attrptr[i] = NULL;
992 }
993
994 exec->vtx.vertex_size = 0;
995 }
996
997
998 void
999 vbo_exec_vtx_init(struct vbo_exec_context *exec, bool use_buffer_objects)
1000 {
1001 struct gl_context *ctx = exec->ctx;
1002
1003 if (use_buffer_objects) {
1004 /* Use buffer objects for immediate mode. */
1005 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1006
1007 exec->vtx.bufferobj = ctx->Driver.NewBufferObject(ctx, IMM_BUFFER_NAME);
1008
1009 /* Map the buffer. */
1010 vbo_exec_vtx_map(exec);
1011 assert(exec->vtx.buffer_ptr);
1012 } else {
1013 /* Use allocated memory for immediate mode. */
1014 _mesa_reference_buffer_object(ctx,
1015 &exec->vtx.bufferobj,
1016 ctx->Shared->NullBufferObj);
1017
1018 exec->vtx.buffer_map = _mesa_align_malloc(VBO_VERT_BUFFER_SIZE, 64);
1019 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
1020 }
1021
1022 vbo_exec_vtxfmt_init(exec);
1023 _mesa_noop_vtxfmt_init(ctx, &exec->vtxfmt_noop);
1024
1025 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1026 vbo_reset_all_attr(exec);
1027 }
1028
1029
1030 void
1031 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1032 {
1033 /* using a real VBO for vertex data */
1034 struct gl_context *ctx = exec->ctx;
1035
1036 /* True VBOs should already be unmapped
1037 */
1038 if (exec->vtx.buffer_map) {
1039 assert(exec->vtx.bufferobj->Name == 0 ||
1040 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1041 if (exec->vtx.bufferobj->Name == 0) {
1042 _mesa_align_free(exec->vtx.buffer_map);
1043 exec->vtx.buffer_map = NULL;
1044 exec->vtx.buffer_ptr = NULL;
1045 }
1046 }
1047
1048 /* Free the vertex buffer. Unmap first if needed.
1049 */
1050 if (_mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1051 ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1052 }
1053 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1054 }
1055
1056
1057 /**
1058 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1059 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1060 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1061 * __struct gl_contextRec::Current and gl_light_attrib::Material
1062 *
1063 * Note that the default T&L engine never clears the
1064 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1065 *
1066 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1067 */
1068 void
1069 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1070 {
1071 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1072
1073 #ifndef NDEBUG
1074 /* debug check: make sure we don't get called recursively */
1075 exec->flush_call_depth++;
1076 assert(exec->flush_call_depth == 1);
1077 #endif
1078
1079 if (_mesa_inside_begin_end(ctx)) {
1080 /* We've had glBegin but not glEnd! */
1081 #ifndef NDEBUG
1082 exec->flush_call_depth--;
1083 assert(exec->flush_call_depth == 0);
1084 #endif
1085 return;
1086 }
1087
1088 /* Flush (draw). */
1089 vbo_exec_FlushVertices_internal(exec, flags);
1090
1091 #ifndef NDEBUG
1092 exec->flush_call_depth--;
1093 assert(exec->flush_call_depth == 0);
1094 #endif
1095 }
1096
1097
1098 void GLAPIENTRY
1099 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1100 {
1101 vbo_exec_Color4f(r, g, b, a);
1102 }
1103
1104
1105 void GLAPIENTRY
1106 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1107 {
1108 vbo_exec_Normal3f(x, y, z);
1109 }
1110
1111
1112 void GLAPIENTRY
1113 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1114 {
1115 vbo_exec_MultiTexCoord4f(target, s, t, r, q);
1116 }
1117
1118
1119 void GLAPIENTRY
1120 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1121 {
1122 vbo_exec_Materialfv(face, pname, params);
1123 }
1124
1125
1126 void GLAPIENTRY
1127 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1128 {
1129 GLfloat p[4];
1130 p[0] = param;
1131 p[1] = p[2] = p[3] = 0.0F;
1132 vbo_exec_Materialfv(face, pname, p);
1133 }
1134
1135
1136 /**
1137 * A special version of glVertexAttrib4f that does not treat index 0 as
1138 * VBO_ATTRIB_POS.
1139 */
1140 static void
1141 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1142 {
1143 GET_CURRENT_CONTEXT(ctx);
1144 if (index < MAX_VERTEX_GENERIC_ATTRIBS)
1145 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1146 else
1147 ERROR(GL_INVALID_VALUE);
1148 }
1149
1150 void GLAPIENTRY
1151 _es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1152 {
1153 VertexAttrib4f_nopos(index, x, y, z, w);
1154 }
1155
1156
1157 void GLAPIENTRY
1158 _es_VertexAttrib1f(GLuint indx, GLfloat x)
1159 {
1160 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1161 }
1162
1163
1164 void GLAPIENTRY
1165 _es_VertexAttrib1fv(GLuint indx, const GLfloat* values)
1166 {
1167 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1168 }
1169
1170
1171 void GLAPIENTRY
1172 _es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y)
1173 {
1174 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1175 }
1176
1177
1178 void GLAPIENTRY
1179 _es_VertexAttrib2fv(GLuint indx, const GLfloat* values)
1180 {
1181 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1182 }
1183
1184
1185 void GLAPIENTRY
1186 _es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1187 {
1188 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1189 }
1190
1191
1192 void GLAPIENTRY
1193 _es_VertexAttrib3fv(GLuint indx, const GLfloat* values)
1194 {
1195 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1196 }
1197
1198
1199 void GLAPIENTRY
1200 _es_VertexAttrib4fv(GLuint indx, const GLfloat* values)
1201 {
1202 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1203 }