Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / mesa / vbo / vbo_exec_api.c
1 /**************************************************************************
2
3 Copyright 2002-2008 VMware, Inc.
4
5 All Rights Reserved.
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 on the rights to use, copy, modify, merge, publish, distribute, sub
11 license, and/or sell copies of the Software, and to permit persons to whom
12 the Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice (including the next
15 paragraph) shall be included in all copies or substantial portions of the
16 Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **************************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keithw@vmware.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/bufferobj.h"
35 #include "main/context.h"
36 #include "main/macros.h"
37 #include "main/vtxfmt.h"
38 #include "main/dlist.h"
39 #include "main/eval.h"
40 #include "main/state.h"
41 #include "main/light.h"
42 #include "main/api_arrayelt.h"
43 #include "main/draw_validate.h"
44 #include "main/dispatch.h"
45 #include "util/bitscan.h"
46 #include "util/u_memory.h"
47
48 #include "vbo_noop.h"
49 #include "vbo_private.h"
50
51
52 /** ID/name for immediate-mode VBO */
53 #define IMM_BUFFER_NAME 0xaabbccdd
54
55
56 static void
57 vbo_reset_all_attr(struct vbo_exec_context *exec);
58
59
60 /**
61 * Close off the last primitive, execute the buffer, restart the
62 * primitive. This is called when we fill a vertex buffer before
63 * hitting glEnd.
64 */
65 static void
66 vbo_exec_wrap_buffers(struct vbo_exec_context *exec)
67 {
68 if (exec->vtx.prim_count == 0) {
69 exec->vtx.copied.nr = 0;
70 exec->vtx.vert_count = 0;
71 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
72 }
73 else {
74 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
75 const GLuint last_begin = last_prim->begin;
76 GLuint last_count;
77
78 if (_mesa_inside_begin_end(exec->ctx)) {
79 last_prim->count = exec->vtx.vert_count - last_prim->start;
80 }
81
82 last_count = last_prim->count;
83
84 /* Special handling for wrapping GL_LINE_LOOP */
85 if (last_prim->mode == GL_LINE_LOOP &&
86 last_count > 0 &&
87 !last_prim->end) {
88 /* draw this section of the incomplete line loop as a line strip */
89 last_prim->mode = GL_LINE_STRIP;
90 if (!last_prim->begin) {
91 /* This is not the first section of the line loop, so don't
92 * draw the 0th vertex. We're saving it until we draw the
93 * very last section of the loop.
94 */
95 last_prim->start++;
96 last_prim->count--;
97 }
98 }
99
100 /* Execute the buffer and save copied vertices.
101 */
102 if (exec->vtx.vert_count)
103 vbo_exec_vtx_flush(exec);
104 else {
105 exec->vtx.prim_count = 0;
106 exec->vtx.copied.nr = 0;
107 }
108
109 /* Emit a glBegin to start the new list.
110 */
111 assert(exec->vtx.prim_count == 0);
112
113 if (_mesa_inside_begin_end(exec->ctx)) {
114 exec->vtx.prim[0].mode = exec->ctx->Driver.CurrentExecPrimitive;
115 exec->vtx.prim[0].begin = 0;
116 exec->vtx.prim[0].end = 0;
117 exec->vtx.prim[0].start = 0;
118 exec->vtx.prim[0].count = 0;
119 exec->vtx.prim_count++;
120
121 if (exec->vtx.copied.nr == last_count)
122 exec->vtx.prim[0].begin = last_begin;
123 }
124 }
125 }
126
127
128 /**
129 * Deal with buffer wrapping where provoked by the vertex buffer
130 * filling up, as opposed to upgrade_vertex().
131 */
132 static void
133 vbo_exec_vtx_wrap(struct vbo_exec_context *exec)
134 {
135 unsigned numComponents;
136
137 /* Run pipeline on current vertices, copy wrapped vertices
138 * to exec->vtx.copied.
139 */
140 vbo_exec_wrap_buffers(exec);
141
142 if (!exec->vtx.buffer_ptr) {
143 /* probably ran out of memory earlier when allocating the VBO */
144 return;
145 }
146
147 /* Copy stored stored vertices to start of new list.
148 */
149 assert(exec->vtx.max_vert - exec->vtx.vert_count > exec->vtx.copied.nr);
150
151 numComponents = exec->vtx.copied.nr * exec->vtx.vertex_size;
152 memcpy(exec->vtx.buffer_ptr,
153 exec->vtx.copied.buffer,
154 numComponents * sizeof(fi_type));
155 exec->vtx.buffer_ptr += numComponents;
156 exec->vtx.vert_count += exec->vtx.copied.nr;
157
158 exec->vtx.copied.nr = 0;
159 }
160
161
162 /**
163 * Copy the active vertex's values to the ctx->Current fields.
164 */
165 static void
166 vbo_exec_copy_to_current(struct vbo_exec_context *exec)
167 {
168 struct gl_context *ctx = exec->ctx;
169 struct vbo_context *vbo = vbo_context(ctx);
170 GLbitfield64 enabled = exec->vtx.enabled & (~BITFIELD64_BIT(VBO_ATTRIB_POS));
171
172 while (enabled) {
173 const int i = u_bit_scan64(&enabled);
174
175 /* Note: the exec->vtx.current[i] pointers point into the
176 * ctx->Current.Attrib and ctx->Light.Material.Attrib arrays.
177 */
178 GLfloat *current = (GLfloat *)vbo->current[i].Ptr;
179 fi_type tmp[8]; /* space for doubles */
180 int dmul = 1;
181
182 if (exec->vtx.attr[i].type == GL_DOUBLE ||
183 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB)
184 dmul = 2;
185
186 assert(exec->vtx.attr[i].size);
187
188 if (exec->vtx.attr[i].type == GL_DOUBLE ||
189 exec->vtx.attr[i].type == GL_UNSIGNED_INT64_ARB) {
190 memset(tmp, 0, sizeof(tmp));
191 memcpy(tmp, exec->vtx.attrptr[i], exec->vtx.attr[i].size * sizeof(GLfloat));
192 } else {
193 COPY_CLEAN_4V_TYPE_AS_UNION(tmp,
194 exec->vtx.attr[i].size,
195 exec->vtx.attrptr[i],
196 exec->vtx.attr[i].type);
197 }
198
199 if (exec->vtx.attr[i].type != vbo->current[i].Format.Type ||
200 memcmp(current, tmp, 4 * sizeof(GLfloat) * dmul) != 0) {
201 memcpy(current, tmp, 4 * sizeof(GLfloat) * dmul);
202
203 /* Given that we explicitly state size here, there is no need
204 * for the COPY_CLEAN above, could just copy 16 bytes and be
205 * done. The only problem is when Mesa accesses ctx->Current
206 * directly.
207 */
208 /* Size here is in components - not bytes */
209 vbo_set_vertex_format(&vbo->current[i].Format,
210 exec->vtx.attr[i].size / dmul,
211 exec->vtx.attr[i].type);
212
213 /* This triggers rather too much recalculation of Mesa state
214 * that doesn't get used (eg light positions).
215 */
216 if (i >= VBO_ATTRIB_MAT_FRONT_AMBIENT &&
217 i <= VBO_ATTRIB_MAT_BACK_INDEXES)
218 ctx->NewState |= _NEW_LIGHT;
219
220 ctx->NewState |= _NEW_CURRENT_ATTRIB;
221 }
222 }
223
224 /* Colormaterial -- this kindof sucks.
225 */
226 if (ctx->Light.ColorMaterialEnabled &&
227 exec->vtx.attr[VBO_ATTRIB_COLOR0].size) {
228 _mesa_update_color_material(ctx,
229 ctx->Current.Attrib[VBO_ATTRIB_COLOR0]);
230 }
231 }
232
233
234 /**
235 * Flush existing data, set new attrib size, replay copied vertices.
236 * This is called when we transition from a small vertex attribute size
237 * to a larger one. Ex: glTexCoord2f -> glTexCoord4f.
238 * We need to go back over the previous 2-component texcoords and insert
239 * zero and one values.
240 * \param attr VBO_ATTRIB_x vertex attribute value
241 */
242 static void
243 vbo_exec_wrap_upgrade_vertex(struct vbo_exec_context *exec,
244 GLuint attr, GLuint newSize, GLenum newType)
245 {
246 struct gl_context *ctx = exec->ctx;
247 struct vbo_context *vbo = vbo_context(ctx);
248 const GLint lastcount = exec->vtx.vert_count;
249 fi_type *old_attrptr[VBO_ATTRIB_MAX];
250 const GLuint old_vtx_size_no_pos = exec->vtx.vertex_size_no_pos;
251 const GLuint old_vtx_size = exec->vtx.vertex_size; /* floats per vertex */
252 const GLuint oldSize = exec->vtx.attr[attr].size;
253 GLuint i;
254
255 assert(attr < VBO_ATTRIB_MAX);
256
257 /* Run pipeline on current vertices, copy wrapped vertices
258 * to exec->vtx.copied.
259 */
260 vbo_exec_wrap_buffers(exec);
261
262 if (unlikely(exec->vtx.copied.nr)) {
263 /* We're in the middle of a primitive, keep the old vertex
264 * format around to be able to translate the copied vertices to
265 * the new format.
266 */
267 memcpy(old_attrptr, exec->vtx.attrptr, sizeof(old_attrptr));
268 }
269
270 /* Heuristic: Attempt to isolate attributes received outside
271 * begin/end so that they don't bloat the vertices.
272 */
273 if (!_mesa_inside_begin_end(ctx) &&
274 !oldSize && lastcount > 8 && exec->vtx.vertex_size) {
275 vbo_exec_copy_to_current(exec);
276 vbo_reset_all_attr(exec);
277 }
278
279 /* Fix up sizes:
280 */
281 exec->vtx.attr[attr].size = newSize;
282 exec->vtx.attr[attr].active_size = newSize;
283 exec->vtx.attr[attr].type = newType;
284 exec->vtx.vertex_size += newSize - oldSize;
285 exec->vtx.vertex_size_no_pos = exec->vtx.vertex_size - exec->vtx.attr[0].size;
286 exec->vtx.max_vert = vbo_compute_max_verts(exec);
287 exec->vtx.vert_count = 0;
288 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
289 exec->vtx.enabled |= BITFIELD64_BIT(attr);
290
291 if (attr != 0) {
292 if (unlikely(oldSize)) {
293 unsigned offset = exec->vtx.attrptr[attr] - exec->vtx.vertex;
294
295 /* If there are attribs after the resized attrib... */
296 if (offset + oldSize < old_vtx_size_no_pos) {
297 int size_diff = newSize - oldSize;
298 fi_type *old_first = exec->vtx.attrptr[attr] + oldSize;
299 fi_type *new_first = exec->vtx.attrptr[attr] + newSize;
300 fi_type *old_last = exec->vtx.vertex + old_vtx_size_no_pos - 1;
301 fi_type *new_last = exec->vtx.vertex + exec->vtx.vertex_size_no_pos - 1;
302
303 if (size_diff < 0) {
304 /* Decreasing the size: Copy from first to last to move
305 * elements to the left.
306 */
307 fi_type *old_end = old_last + 1;
308 fi_type *old = old_first;
309 fi_type *new = new_first;
310
311 do {
312 *new++ = *old++;
313 } while (old != old_end);
314 } else {
315 /* Increasing the size: Copy from last to first to move
316 * elements to the right.
317 */
318 fi_type *old_end = old_first - 1;
319 fi_type *old = old_last;
320 fi_type *new = new_last;
321
322 do {
323 *new-- = *old--;
324 } while (old != old_end);
325 }
326
327 /* Update pointers to attribs, because we moved them. */
328 GLbitfield64 enabled = exec->vtx.enabled &
329 ~BITFIELD64_BIT(VBO_ATTRIB_POS) &
330 ~BITFIELD64_BIT(attr);
331 while (enabled) {
332 unsigned i = u_bit_scan64(&enabled);
333
334 if (exec->vtx.attrptr[i] > exec->vtx.attrptr[attr])
335 exec->vtx.attrptr[i] += size_diff;
336 }
337 }
338 } else {
339 /* Just have to append the new attribute at the end */
340 exec->vtx.attrptr[attr] = exec->vtx.vertex +
341 exec->vtx.vertex_size_no_pos - newSize;
342 }
343 }
344
345 /* The position is always last. */
346 exec->vtx.attrptr[0] = exec->vtx.vertex + exec->vtx.vertex_size_no_pos;
347
348 /* Replay stored vertices to translate them
349 * to new format here.
350 *
351 * -- No need to replay - just copy piecewise
352 */
353 if (unlikely(exec->vtx.copied.nr)) {
354 fi_type *data = exec->vtx.copied.buffer;
355 fi_type *dest = exec->vtx.buffer_ptr;
356
357 assert(exec->vtx.buffer_ptr == exec->vtx.buffer_map);
358
359 for (i = 0 ; i < exec->vtx.copied.nr ; i++) {
360 GLbitfield64 enabled = exec->vtx.enabled;
361 while (enabled) {
362 const int j = u_bit_scan64(&enabled);
363 GLuint sz = exec->vtx.attr[j].size;
364 GLint old_offset = old_attrptr[j] - exec->vtx.vertex;
365 GLint new_offset = exec->vtx.attrptr[j] - exec->vtx.vertex;
366
367 assert(sz);
368
369 if (j == attr) {
370 if (oldSize) {
371 fi_type tmp[4];
372 COPY_CLEAN_4V_TYPE_AS_UNION(tmp, oldSize,
373 data + old_offset,
374 exec->vtx.attr[j].type);
375 COPY_SZ_4V(dest + new_offset, newSize, tmp);
376 } else {
377 fi_type *current = (fi_type *)vbo->current[j].Ptr;
378 COPY_SZ_4V(dest + new_offset, sz, current);
379 }
380 }
381 else {
382 COPY_SZ_4V(dest + new_offset, sz, data + old_offset);
383 }
384 }
385
386 data += old_vtx_size;
387 dest += exec->vtx.vertex_size;
388 }
389
390 exec->vtx.buffer_ptr = dest;
391 exec->vtx.vert_count += exec->vtx.copied.nr;
392 exec->vtx.copied.nr = 0;
393 }
394 }
395
396
397 /**
398 * This is when a vertex attribute transitions to a different size.
399 * For example, we saw a bunch of glTexCoord2f() calls and now we got a
400 * glTexCoord4f() call. We promote the array from size=2 to size=4.
401 * \param newSize size of new vertex (number of 32-bit words).
402 * \param attr VBO_ATTRIB_x vertex attribute value
403 */
404 static void
405 vbo_exec_fixup_vertex(struct gl_context *ctx, GLuint attr,
406 GLuint newSize, GLenum newType)
407 {
408 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
409
410 assert(attr < VBO_ATTRIB_MAX);
411
412 if (newSize > exec->vtx.attr[attr].size ||
413 newType != exec->vtx.attr[attr].type) {
414 /* New size is larger. Need to flush existing vertices and get
415 * an enlarged vertex format.
416 */
417 vbo_exec_wrap_upgrade_vertex(exec, attr, newSize, newType);
418 }
419 else if (newSize < exec->vtx.attr[attr].active_size) {
420 GLuint i;
421 const fi_type *id =
422 vbo_get_default_vals_as_union(exec->vtx.attr[attr].type);
423
424 /* New size is smaller - just need to fill in some
425 * zeros. Don't need to flush or wrap.
426 */
427 for (i = newSize; i <= exec->vtx.attr[attr].size; i++)
428 exec->vtx.attrptr[attr][i-1] = id[i-1];
429
430 exec->vtx.attr[attr].active_size = newSize;
431 }
432 }
433
434
435 /**
436 * If index=0, does glVertexAttrib*() alias glVertex() to emit a vertex?
437 * It depends on a few things, including whether we're inside or outside
438 * of glBegin/glEnd.
439 */
440 static inline bool
441 is_vertex_position(const struct gl_context *ctx, GLuint index)
442 {
443 return (index == 0 &&
444 _mesa_attr_zero_aliases_vertex(ctx) &&
445 _mesa_inside_begin_end(ctx));
446 }
447
448 /* Write a 64-bit value into a 32-bit pointer by preserving endianness. */
449 #if UTIL_ARCH_LITTLE_ENDIAN
450 #define SET_64BIT(dst32, u64) do { \
451 *(dst32)++ = (u64); \
452 *(dst32)++ = (uint64_t)(u64) >> 32; \
453 } while (0)
454 #else
455 #define SET_64BIT(dst32, u64) do { \
456 *(dst32)++ = (uint64_t)(u64) >> 32; \
457 *(dst32)++ = (u64); \
458 } while (0)
459 #endif
460
461
462 /**
463 * This macro is used to implement all the glVertex, glColor, glTexCoord,
464 * glVertexAttrib, etc functions.
465 * \param A VBO_ATTRIB_x attribute index
466 * \param N attribute size (1..4)
467 * \param T type (GL_FLOAT, GL_DOUBLE, GL_INT, GL_UNSIGNED_INT)
468 * \param C cast type (uint32_t or uint64_t)
469 * \param V0, V1, v2, V3 attribute value
470 */
471 #define ATTR_UNION(A, N, T, C, V0, V1, V2, V3) \
472 do { \
473 struct vbo_exec_context *exec = &vbo_context(ctx)->exec; \
474 int sz = (sizeof(C) / sizeof(GLfloat)); \
475 \
476 assert(sz == 1 || sz == 2); \
477 \
478 /* store a copy of the attribute in exec except for glVertex */ \
479 if ((A) != 0) { \
480 /* Check if attribute size or type is changing. */ \
481 if (unlikely(exec->vtx.attr[A].active_size != N * sz || \
482 exec->vtx.attr[A].type != T)) { \
483 vbo_exec_fixup_vertex(ctx, A, N * sz, T); \
484 } \
485 \
486 C *dest = (C *)exec->vtx.attrptr[A]; \
487 if (N>0) dest[0] = V0; \
488 if (N>1) dest[1] = V1; \
489 if (N>2) dest[2] = V2; \
490 if (N>3) dest[3] = V3; \
491 assert(exec->vtx.attr[A].type == T); \
492 \
493 /* we now have accumulated a per-vertex attribute */ \
494 ctx->Driver.NeedFlush |= FLUSH_UPDATE_CURRENT; \
495 } else { \
496 /* This is a glVertex call */ \
497 int size = exec->vtx.attr[0].size; \
498 \
499 /* Check if attribute size or type is changing. */ \
500 if (unlikely(size < N * sz || \
501 exec->vtx.attr[0].type != T)) { \
502 vbo_exec_wrap_upgrade_vertex(exec, 0, N * sz, T); \
503 } \
504 \
505 uint32_t *dst = (uint32_t *)exec->vtx.buffer_ptr; \
506 uint32_t *src = (uint32_t *)exec->vtx.vertex; \
507 unsigned vertex_size_no_pos = exec->vtx.vertex_size_no_pos; \
508 \
509 /* Copy over attributes from exec. */ \
510 for (unsigned i = 0; i < vertex_size_no_pos; i++) \
511 *dst++ = *src++; \
512 \
513 /* Store the position, which is always last and can have 32 or */ \
514 /* 64 bits per channel. */ \
515 if (sizeof(C) == 4) { \
516 if (N > 0) *dst++ = V0; \
517 if (N > 1) *dst++ = V1; \
518 if (N > 2) *dst++ = V2; \
519 if (N > 3) *dst++ = V3; \
520 \
521 if (unlikely(N < size)) { \
522 if (N < 2 && size >= 2) *dst++ = V1; \
523 if (N < 3 && size >= 3) *dst++ = V2; \
524 if (N < 4 && size >= 4) *dst++ = V3; \
525 } \
526 } else { \
527 /* 64 bits: dst can be unaligned, so copy each 4-byte word */ \
528 /* separately */ \
529 if (N > 0) SET_64BIT(dst, V0); \
530 if (N > 1) SET_64BIT(dst, V1); \
531 if (N > 2) SET_64BIT(dst, V2); \
532 if (N > 3) SET_64BIT(dst, V3); \
533 \
534 if (unlikely(N * 2 < size)) { \
535 if (N < 2 && size >= 4) SET_64BIT(dst, V1); \
536 if (N < 3 && size >= 6) SET_64BIT(dst, V2); \
537 if (N < 4 && size >= 8) SET_64BIT(dst, V3); \
538 } \
539 } \
540 \
541 /* dst now points at the beginning of the next vertex */ \
542 exec->vtx.buffer_ptr = (fi_type*)dst; \
543 \
544 /* Don't set FLUSH_UPDATE_CURRENT because */ \
545 /* Current.Attrib[VBO_ATTRIB_POS] is never used. */ \
546 \
547 if (unlikely(++exec->vtx.vert_count >= exec->vtx.max_vert)) \
548 vbo_exec_vtx_wrap(exec); \
549 } \
550 } while (0)
551
552
553 #undef ERROR
554 #define ERROR(err) _mesa_error(ctx, err, __func__)
555 #define TAG(x) vbo_exec_##x
556
557 #include "vbo_attrib_tmp.h"
558
559
560
561 /**
562 * Execute a glMaterial call. Note that if GL_COLOR_MATERIAL is enabled,
563 * this may be a (partial) no-op.
564 */
565 static void GLAPIENTRY
566 vbo_exec_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
567 {
568 GLbitfield updateMats;
569 GET_CURRENT_CONTEXT(ctx);
570
571 /* This function should be a no-op when it tries to update material
572 * attributes which are currently tracking glColor via glColorMaterial.
573 * The updateMats var will be a mask of the MAT_BIT_FRONT/BACK_x bits
574 * indicating which material attributes can actually be updated below.
575 */
576 if (ctx->Light.ColorMaterialEnabled) {
577 updateMats = ~ctx->Light._ColorMaterialBitmask;
578 }
579 else {
580 /* GL_COLOR_MATERIAL is disabled so don't skip any material updates */
581 updateMats = ALL_MATERIAL_BITS;
582 }
583
584 if (ctx->API == API_OPENGL_COMPAT && face == GL_FRONT) {
585 updateMats &= FRONT_MATERIAL_BITS;
586 }
587 else if (ctx->API == API_OPENGL_COMPAT && face == GL_BACK) {
588 updateMats &= BACK_MATERIAL_BITS;
589 }
590 else if (face != GL_FRONT_AND_BACK) {
591 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterial(invalid face)");
592 return;
593 }
594
595 switch (pname) {
596 case GL_EMISSION:
597 if (updateMats & MAT_BIT_FRONT_EMISSION)
598 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_EMISSION, 4, params);
599 if (updateMats & MAT_BIT_BACK_EMISSION)
600 MAT_ATTR(VBO_ATTRIB_MAT_BACK_EMISSION, 4, params);
601 break;
602 case GL_AMBIENT:
603 if (updateMats & MAT_BIT_FRONT_AMBIENT)
604 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
605 if (updateMats & MAT_BIT_BACK_AMBIENT)
606 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
607 break;
608 case GL_DIFFUSE:
609 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
610 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
611 if (updateMats & MAT_BIT_BACK_DIFFUSE)
612 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
613 break;
614 case GL_SPECULAR:
615 if (updateMats & MAT_BIT_FRONT_SPECULAR)
616 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SPECULAR, 4, params);
617 if (updateMats & MAT_BIT_BACK_SPECULAR)
618 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SPECULAR, 4, params);
619 break;
620 case GL_SHININESS:
621 if (*params < 0 || *params > ctx->Const.MaxShininess) {
622 _mesa_error(ctx, GL_INVALID_VALUE,
623 "glMaterial(invalid shininess: %f out range [0, %f])",
624 *params, ctx->Const.MaxShininess);
625 return;
626 }
627 if (updateMats & MAT_BIT_FRONT_SHININESS)
628 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_SHININESS, 1, params);
629 if (updateMats & MAT_BIT_BACK_SHININESS)
630 MAT_ATTR(VBO_ATTRIB_MAT_BACK_SHININESS, 1, params);
631 break;
632 case GL_COLOR_INDEXES:
633 if (ctx->API != API_OPENGL_COMPAT) {
634 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
635 return;
636 }
637 if (updateMats & MAT_BIT_FRONT_INDEXES)
638 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_INDEXES, 3, params);
639 if (updateMats & MAT_BIT_BACK_INDEXES)
640 MAT_ATTR(VBO_ATTRIB_MAT_BACK_INDEXES, 3, params);
641 break;
642 case GL_AMBIENT_AND_DIFFUSE:
643 if (updateMats & MAT_BIT_FRONT_AMBIENT)
644 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_AMBIENT, 4, params);
645 if (updateMats & MAT_BIT_FRONT_DIFFUSE)
646 MAT_ATTR(VBO_ATTRIB_MAT_FRONT_DIFFUSE, 4, params);
647 if (updateMats & MAT_BIT_BACK_AMBIENT)
648 MAT_ATTR(VBO_ATTRIB_MAT_BACK_AMBIENT, 4, params);
649 if (updateMats & MAT_BIT_BACK_DIFFUSE)
650 MAT_ATTR(VBO_ATTRIB_MAT_BACK_DIFFUSE, 4, params);
651 break;
652 default:
653 _mesa_error(ctx, GL_INVALID_ENUM, "glMaterialfv(pname)");
654 return;
655 }
656 }
657
658
659 /**
660 * Flush (draw) vertices.
661 *
662 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
663 */
664 static void
665 vbo_exec_FlushVertices_internal(struct vbo_exec_context *exec, unsigned flags)
666 {
667 struct gl_context *ctx = exec->ctx;
668
669 if (flags & FLUSH_STORED_VERTICES) {
670 if (exec->vtx.vert_count) {
671 vbo_exec_vtx_flush(exec);
672 }
673
674 if (exec->vtx.vertex_size) {
675 vbo_exec_copy_to_current(exec);
676 vbo_reset_all_attr(exec);
677 }
678
679 /* All done. */
680 ctx->Driver.NeedFlush = 0;
681 } else {
682 assert(flags == FLUSH_UPDATE_CURRENT);
683
684 /* Note that the vertex size is unchanged.
685 * (vbo_reset_all_attr isn't called)
686 */
687 vbo_exec_copy_to_current(exec);
688
689 /* Only FLUSH_UPDATE_CURRENT is done. */
690 ctx->Driver.NeedFlush = ~FLUSH_UPDATE_CURRENT;
691 }
692 }
693
694
695 static void GLAPIENTRY
696 vbo_exec_EvalCoord1f(GLfloat u)
697 {
698 GET_CURRENT_CONTEXT(ctx);
699 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
700
701 {
702 GLint i;
703 if (exec->eval.recalculate_maps)
704 vbo_exec_eval_update(exec);
705
706 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
707 if (exec->eval.map1[i].map)
708 if (exec->vtx.attr[i].active_size != exec->eval.map1[i].sz)
709 vbo_exec_fixup_vertex(ctx, i, exec->eval.map1[i].sz, GL_FLOAT);
710 }
711 }
712
713 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
714 exec->vtx.vertex_size * sizeof(GLfloat));
715
716 vbo_exec_do_EvalCoord1f(exec, u);
717
718 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
719 exec->vtx.vertex_size * sizeof(GLfloat));
720 }
721
722
723 static void GLAPIENTRY
724 vbo_exec_EvalCoord2f(GLfloat u, GLfloat v)
725 {
726 GET_CURRENT_CONTEXT(ctx);
727 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
728
729 {
730 GLint i;
731 if (exec->eval.recalculate_maps)
732 vbo_exec_eval_update(exec);
733
734 for (i = 0; i <= VBO_ATTRIB_TEX7; i++) {
735 if (exec->eval.map2[i].map)
736 if (exec->vtx.attr[i].active_size != exec->eval.map2[i].sz)
737 vbo_exec_fixup_vertex(ctx, i, exec->eval.map2[i].sz, GL_FLOAT);
738 }
739
740 if (ctx->Eval.AutoNormal)
741 if (exec->vtx.attr[VBO_ATTRIB_NORMAL].active_size != 3)
742 vbo_exec_fixup_vertex(ctx, VBO_ATTRIB_NORMAL, 3, GL_FLOAT);
743 }
744
745 memcpy(exec->vtx.copied.buffer, exec->vtx.vertex,
746 exec->vtx.vertex_size * sizeof(GLfloat));
747
748 vbo_exec_do_EvalCoord2f(exec, u, v);
749
750 memcpy(exec->vtx.vertex, exec->vtx.copied.buffer,
751 exec->vtx.vertex_size * sizeof(GLfloat));
752 }
753
754
755 static void GLAPIENTRY
756 vbo_exec_EvalCoord1fv(const GLfloat *u)
757 {
758 vbo_exec_EvalCoord1f(u[0]);
759 }
760
761
762 static void GLAPIENTRY
763 vbo_exec_EvalCoord2fv(const GLfloat *u)
764 {
765 vbo_exec_EvalCoord2f(u[0], u[1]);
766 }
767
768
769 static void GLAPIENTRY
770 vbo_exec_EvalPoint1(GLint i)
771 {
772 GET_CURRENT_CONTEXT(ctx);
773 GLfloat du = ((ctx->Eval.MapGrid1u2 - ctx->Eval.MapGrid1u1) /
774 (GLfloat) ctx->Eval.MapGrid1un);
775 GLfloat u = i * du + ctx->Eval.MapGrid1u1;
776
777 vbo_exec_EvalCoord1f(u);
778 }
779
780
781 static void GLAPIENTRY
782 vbo_exec_EvalPoint2(GLint i, GLint j)
783 {
784 GET_CURRENT_CONTEXT(ctx);
785 GLfloat du = ((ctx->Eval.MapGrid2u2 - ctx->Eval.MapGrid2u1) /
786 (GLfloat) ctx->Eval.MapGrid2un);
787 GLfloat dv = ((ctx->Eval.MapGrid2v2 - ctx->Eval.MapGrid2v1) /
788 (GLfloat) ctx->Eval.MapGrid2vn);
789 GLfloat u = i * du + ctx->Eval.MapGrid2u1;
790 GLfloat v = j * dv + ctx->Eval.MapGrid2v1;
791
792 vbo_exec_EvalCoord2f(u, v);
793 }
794
795
796 /**
797 * Called via glBegin.
798 */
799 static void GLAPIENTRY
800 vbo_exec_Begin(GLenum mode)
801 {
802 GET_CURRENT_CONTEXT(ctx);
803 struct vbo_context *vbo = vbo_context(ctx);
804 struct vbo_exec_context *exec = &vbo->exec;
805 int i;
806
807 if (_mesa_inside_begin_end(ctx)) {
808 _mesa_error(ctx, GL_INVALID_OPERATION, "glBegin");
809 return;
810 }
811
812 if (!_mesa_valid_prim_mode(ctx, mode, "glBegin")) {
813 return;
814 }
815
816 if (!_mesa_valid_to_render(ctx, "glBegin")) {
817 return;
818 }
819
820 /* Heuristic: attempt to isolate attributes occurring outside
821 * begin/end pairs.
822 *
823 * Use FLUSH_STORED_VERTICES, because it updates current attribs and
824 * sets vertex_size to 0. (FLUSH_UPDATE_CURRENT doesn't change vertex_size)
825 */
826 if (exec->vtx.vertex_size && !exec->vtx.attr[VBO_ATTRIB_POS].size)
827 vbo_exec_FlushVertices_internal(exec, FLUSH_STORED_VERTICES);
828
829 i = exec->vtx.prim_count++;
830 exec->vtx.prim[i].mode = mode;
831 exec->vtx.prim[i].begin = 1;
832 exec->vtx.prim[i].end = 0;
833 exec->vtx.prim[i].start = exec->vtx.vert_count;
834 exec->vtx.prim[i].count = 0;
835
836 ctx->Driver.CurrentExecPrimitive = mode;
837
838 ctx->Exec = ctx->BeginEnd;
839
840 /* We may have been called from a display list, in which case we should
841 * leave dlist.c's dispatch table in place.
842 */
843 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
844 ctx->CurrentServerDispatch = ctx->Exec;
845 } else if (ctx->CurrentClientDispatch == ctx->OutsideBeginEnd) {
846 ctx->CurrentClientDispatch = ctx->Exec;
847 _glapi_set_dispatch(ctx->CurrentClientDispatch);
848 } else {
849 assert(ctx->CurrentClientDispatch == ctx->Save);
850 }
851 }
852
853
854 /**
855 * Try to merge / concatenate the two most recent VBO primitives.
856 */
857 static void
858 try_vbo_merge(struct vbo_exec_context *exec)
859 {
860 struct _mesa_prim *cur = &exec->vtx.prim[exec->vtx.prim_count - 1];
861
862 assert(exec->vtx.prim_count >= 1);
863
864 vbo_try_prim_conversion(cur);
865
866 if (exec->vtx.prim_count >= 2) {
867 struct _mesa_prim *prev = &exec->vtx.prim[exec->vtx.prim_count - 2];
868 assert(prev == cur - 1);
869
870 if (vbo_merge_draws(exec->ctx, false, prev, cur))
871 exec->vtx.prim_count--; /* drop the last primitive */
872 }
873 }
874
875
876 /**
877 * Called via glEnd.
878 */
879 static void GLAPIENTRY
880 vbo_exec_End(void)
881 {
882 GET_CURRENT_CONTEXT(ctx);
883 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
884
885 if (!_mesa_inside_begin_end(ctx)) {
886 _mesa_error(ctx, GL_INVALID_OPERATION, "glEnd");
887 return;
888 }
889
890 ctx->Exec = ctx->OutsideBeginEnd;
891
892 if (ctx->CurrentClientDispatch == ctx->MarshalExec) {
893 ctx->CurrentServerDispatch = ctx->Exec;
894 } else if (ctx->CurrentClientDispatch == ctx->BeginEnd) {
895 ctx->CurrentClientDispatch = ctx->Exec;
896 _glapi_set_dispatch(ctx->CurrentClientDispatch);
897 }
898
899 if (exec->vtx.prim_count > 0) {
900 /* close off current primitive */
901 struct _mesa_prim *last_prim = &exec->vtx.prim[exec->vtx.prim_count - 1];
902 unsigned count = exec->vtx.vert_count - last_prim->start;
903
904 last_prim->end = 1;
905 last_prim->count = count;
906
907 if (count)
908 ctx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
909
910 /* Special handling for GL_LINE_LOOP */
911 if (last_prim->mode == GL_LINE_LOOP && last_prim->begin == 0) {
912 /* We're finishing drawing a line loop. Append 0th vertex onto
913 * end of vertex buffer so we can draw it as a line strip.
914 */
915 const fi_type *src = exec->vtx.buffer_map +
916 last_prim->start * exec->vtx.vertex_size;
917 fi_type *dst = exec->vtx.buffer_map +
918 exec->vtx.vert_count * exec->vtx.vertex_size;
919
920 /* copy 0th vertex to end of buffer */
921 memcpy(dst, src, exec->vtx.vertex_size * sizeof(fi_type));
922
923 last_prim->start++; /* skip vertex0 */
924 /* note that last_prim->count stays unchanged */
925 last_prim->mode = GL_LINE_STRIP;
926
927 /* Increment the vertex count so the next primitive doesn't
928 * overwrite the last vertex which we just added.
929 */
930 exec->vtx.vert_count++;
931 exec->vtx.buffer_ptr += exec->vtx.vertex_size;
932 }
933
934 try_vbo_merge(exec);
935 }
936
937 ctx->Driver.CurrentExecPrimitive = PRIM_OUTSIDE_BEGIN_END;
938
939 if (exec->vtx.prim_count == VBO_MAX_PRIM)
940 vbo_exec_vtx_flush(exec);
941
942 if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) {
943 _mesa_flush(ctx);
944 }
945 }
946
947
948 /**
949 * Called via glPrimitiveRestartNV()
950 */
951 static void GLAPIENTRY
952 vbo_exec_PrimitiveRestartNV(void)
953 {
954 GLenum curPrim;
955 GET_CURRENT_CONTEXT(ctx);
956
957 curPrim = ctx->Driver.CurrentExecPrimitive;
958
959 if (curPrim == PRIM_OUTSIDE_BEGIN_END) {
960 _mesa_error(ctx, GL_INVALID_OPERATION, "glPrimitiveRestartNV");
961 }
962 else {
963 vbo_exec_End();
964 vbo_exec_Begin(curPrim);
965 }
966 }
967
968
969 static void
970 vbo_exec_vtxfmt_init(struct vbo_exec_context *exec)
971 {
972 struct gl_context *ctx = exec->ctx;
973 GLvertexformat *vfmt = &exec->vtxfmt;
974
975 #define NAME_AE(x) _ae_##x
976 #define NAME_CALLLIST(x) _mesa_##x
977 #define NAME(x) vbo_exec_##x
978 #define NAME_ES(x) _es_##x
979
980 #include "vbo_init_tmp.h"
981 }
982
983
984 static void
985 vbo_reset_all_attr(struct vbo_exec_context *exec)
986 {
987 while (exec->vtx.enabled) {
988 const int i = u_bit_scan64(&exec->vtx.enabled);
989
990 /* Reset the vertex attribute by setting its size to zero. */
991 exec->vtx.attr[i].size = 0;
992 exec->vtx.attr[i].type = GL_FLOAT;
993 exec->vtx.attr[i].active_size = 0;
994 exec->vtx.attrptr[i] = NULL;
995 }
996
997 exec->vtx.vertex_size = 0;
998 }
999
1000
1001 void
1002 vbo_exec_vtx_init(struct vbo_exec_context *exec, bool use_buffer_objects)
1003 {
1004 struct gl_context *ctx = exec->ctx;
1005
1006 if (use_buffer_objects) {
1007 /* Use buffer objects for immediate mode. */
1008 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1009
1010 exec->vtx.bufferobj = ctx->Driver.NewBufferObject(ctx, IMM_BUFFER_NAME);
1011
1012 /* Map the buffer. */
1013 vbo_exec_vtx_map(exec);
1014 assert(exec->vtx.buffer_ptr);
1015 } else {
1016 /* Use allocated memory for immediate mode. */
1017 exec->vtx.bufferobj = NULL;
1018 exec->vtx.buffer_map =
1019 align_malloc(ctx->Const.glBeginEndBufferSize, 64);
1020 exec->vtx.buffer_ptr = exec->vtx.buffer_map;
1021 }
1022
1023 vbo_exec_vtxfmt_init(exec);
1024 _mesa_noop_vtxfmt_init(ctx, &exec->vtxfmt_noop);
1025
1026 exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
1027 vbo_reset_all_attr(exec);
1028 }
1029
1030
1031 void
1032 vbo_exec_vtx_destroy(struct vbo_exec_context *exec)
1033 {
1034 /* using a real VBO for vertex data */
1035 struct gl_context *ctx = exec->ctx;
1036
1037 /* True VBOs should already be unmapped
1038 */
1039 if (exec->vtx.buffer_map) {
1040 assert(!exec->vtx.bufferobj ||
1041 exec->vtx.bufferobj->Name == IMM_BUFFER_NAME);
1042 if (!exec->vtx.bufferobj) {
1043 align_free(exec->vtx.buffer_map);
1044 exec->vtx.buffer_map = NULL;
1045 exec->vtx.buffer_ptr = NULL;
1046 }
1047 }
1048
1049 /* Free the vertex buffer. Unmap first if needed.
1050 */
1051 if (exec->vtx.bufferobj &&
1052 _mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
1053 ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
1054 }
1055 _mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
1056 }
1057
1058
1059 /**
1060 * If inside glBegin()/glEnd(), it should assert(0). Otherwise, if
1061 * FLUSH_STORED_VERTICES bit in \p flags is set flushes any buffered
1062 * vertices, if FLUSH_UPDATE_CURRENT bit is set updates
1063 * __struct gl_contextRec::Current and gl_light_attrib::Material
1064 *
1065 * Note that the default T&L engine never clears the
1066 * FLUSH_UPDATE_CURRENT bit, even after performing the update.
1067 *
1068 * \param flags bitmask of FLUSH_STORED_VERTICES, FLUSH_UPDATE_CURRENT
1069 */
1070 void
1071 vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags)
1072 {
1073 struct vbo_exec_context *exec = &vbo_context(ctx)->exec;
1074
1075 #ifndef NDEBUG
1076 /* debug check: make sure we don't get called recursively */
1077 exec->flush_call_depth++;
1078 assert(exec->flush_call_depth == 1);
1079 #endif
1080
1081 if (_mesa_inside_begin_end(ctx)) {
1082 /* We've had glBegin but not glEnd! */
1083 #ifndef NDEBUG
1084 exec->flush_call_depth--;
1085 assert(exec->flush_call_depth == 0);
1086 #endif
1087 return;
1088 }
1089
1090 /* Flush (draw). */
1091 vbo_exec_FlushVertices_internal(exec, flags);
1092
1093 #ifndef NDEBUG
1094 exec->flush_call_depth--;
1095 assert(exec->flush_call_depth == 0);
1096 #endif
1097 }
1098
1099
1100 void GLAPIENTRY
1101 _es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a)
1102 {
1103 vbo_exec_Color4f(r, g, b, a);
1104 }
1105
1106
1107 void GLAPIENTRY
1108 _es_Normal3f(GLfloat x, GLfloat y, GLfloat z)
1109 {
1110 vbo_exec_Normal3f(x, y, z);
1111 }
1112
1113
1114 void GLAPIENTRY
1115 _es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
1116 {
1117 vbo_exec_MultiTexCoord4f(target, s, t, r, q);
1118 }
1119
1120
1121 void GLAPIENTRY
1122 _es_Materialfv(GLenum face, GLenum pname, const GLfloat *params)
1123 {
1124 vbo_exec_Materialfv(face, pname, params);
1125 }
1126
1127
1128 void GLAPIENTRY
1129 _es_Materialf(GLenum face, GLenum pname, GLfloat param)
1130 {
1131 GLfloat p[4];
1132 p[0] = param;
1133 p[1] = p[2] = p[3] = 0.0F;
1134 vbo_exec_Materialfv(face, pname, p);
1135 }
1136
1137
1138 /**
1139 * A special version of glVertexAttrib4f that does not treat index 0 as
1140 * VBO_ATTRIB_POS.
1141 */
1142 static void
1143 VertexAttrib4f_nopos(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1144 {
1145 GET_CURRENT_CONTEXT(ctx);
1146 if (index < MAX_VERTEX_GENERIC_ATTRIBS)
1147 ATTRF(VBO_ATTRIB_GENERIC0 + index, 4, x, y, z, w);
1148 else
1149 ERROR(GL_INVALID_VALUE);
1150 }
1151
1152 void GLAPIENTRY
1153 _es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
1154 {
1155 VertexAttrib4f_nopos(index, x, y, z, w);
1156 }
1157
1158
1159 void GLAPIENTRY
1160 _es_VertexAttrib1f(GLuint indx, GLfloat x)
1161 {
1162 VertexAttrib4f_nopos(indx, x, 0.0f, 0.0f, 1.0f);
1163 }
1164
1165
1166 void GLAPIENTRY
1167 _es_VertexAttrib1fv(GLuint indx, const GLfloat* values)
1168 {
1169 VertexAttrib4f_nopos(indx, values[0], 0.0f, 0.0f, 1.0f);
1170 }
1171
1172
1173 void GLAPIENTRY
1174 _es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y)
1175 {
1176 VertexAttrib4f_nopos(indx, x, y, 0.0f, 1.0f);
1177 }
1178
1179
1180 void GLAPIENTRY
1181 _es_VertexAttrib2fv(GLuint indx, const GLfloat* values)
1182 {
1183 VertexAttrib4f_nopos(indx, values[0], values[1], 0.0f, 1.0f);
1184 }
1185
1186
1187 void GLAPIENTRY
1188 _es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z)
1189 {
1190 VertexAttrib4f_nopos(indx, x, y, z, 1.0f);
1191 }
1192
1193
1194 void GLAPIENTRY
1195 _es_VertexAttrib3fv(GLuint indx, const GLfloat* values)
1196 {
1197 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], 1.0f);
1198 }
1199
1200
1201 void GLAPIENTRY
1202 _es_VertexAttrib4fv(GLuint indx, const GLfloat* values)
1203 {
1204 VertexAttrib4f_nopos(indx, values[0], values[1], values[2], values[3]);
1205 }