glx: Undo memory allocation checking damage.
[mesa.git] / src / glx / indirect_vertex_array.c
1 /*
2 * (C) Copyright IBM Corporation 2004, 2005
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * IBM,
20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <inttypes.h>
27 #include <assert.h>
28 #include <string.h>
29
30 #include "glxclient.h"
31 #include "indirect.h"
32 #include <GL/glxproto.h>
33 #include "glxextensions.h"
34 #include "indirect_vertex_array.h"
35 #include "indirect_vertex_array_priv.h"
36
37 #define __GLX_PAD(n) (((n)+3) & ~3)
38
39 /**
40 * \file indirect_vertex_array.c
41 * Implement GLX protocol for vertex arrays and vertex buffer objects.
42 *
43 * The most important function in this fill is \c fill_array_info_cache.
44 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
45 * in the DrawArrays protocol. Certain operations, such as enabling or
46 * disabling an array, can invalidate this cache. \c fill_array_info_cache
47 * fills-in this data. Additionally, it examines the enabled state and
48 * other factors to determine what "version" of DrawArrays protocoal can be
49 * used.
50 *
51 * Current, only two versions of DrawArrays protocol are implemented. The
52 * first version is the "none" protocol. This is the fallback when the
53 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented
54 * by sending batches of immediate mode commands that are equivalent to the
55 * DrawArrays protocol.
56 *
57 * The other protocol that is currently implemented is the "old" protocol.
58 * This is the GL 1.1 DrawArrays protocol. The only difference between GL
59 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
60 * This protocol is called "old" because the ARB is in the process of
61 * defining a new protocol, which will probably be called wither "new" or
62 * "vbo", to support multiple texture coordinate arrays, generic attributes,
63 * and vertex buffer objects.
64 *
65 * \author Ian Romanick <ian.d.romanick@intel.com>
66 */
67
68 static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count);
69 static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count);
70
71 static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
72 const GLvoid * indices);
73 static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
74 const GLvoid * indices);
75
76
77 static GLubyte *emit_element_none(GLubyte * dst,
78 const struct array_state_vector *arrays,
79 unsigned index);
80 static GLubyte *emit_element_old(GLubyte * dst,
81 const struct array_state_vector *arrays,
82 unsigned index);
83 static struct array_state *get_array_entry(const struct array_state_vector
84 *arrays, GLenum key,
85 unsigned index);
86 static void fill_array_info_cache(struct array_state_vector *arrays);
87 static GLboolean validate_mode(struct glx_context * gc, GLenum mode);
88 static GLboolean validate_count(struct glx_context * gc, GLsizei count);
89 static GLboolean validate_type(struct glx_context * gc, GLenum type);
90
91
92 /**
93 * Table of sizes, in bytes, of a GL types. All of the type enums are be in
94 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e.,
95 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the
96 * type enums masked with 0x0f.
97 *
98 * \notes
99 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES,
100 * \c GL_3_BYTES, or \c GL_4_BYTES.
101 */
102 const GLuint __glXTypeSize_table[16] = {
103 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
104 };
105
106
107 /**
108 * Free the per-context array state that was allocated with
109 * __glXInitVertexArrayState().
110 */
111 void
112 __glXFreeVertexArrayState(struct glx_context * gc)
113 {
114 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
115 struct array_state_vector *arrays = state->array_state;
116
117 if (arrays) {
118 free(arrays->stack);
119 arrays->stack = NULL;
120 free(arrays->arrays);
121 arrays->arrays = NULL;
122 free(arrays);
123 state->array_state = NULL;
124 }
125 }
126
127
128 /**
129 * Initialize vertex array state of a GLX context.
130 *
131 * \param gc GLX context whose vertex array state is to be initialized.
132 *
133 * \warning
134 * This function may only be called after struct glx_context::gl_extension_bits,
135 * struct glx_context::server_minor, and __GLXcontext::server_major have been
136 * initialized. These values are used to determine what vertex arrays are
137 * supported.
138 */
139 void
140 __glXInitVertexArrayState(struct glx_context * gc)
141 {
142 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
143 struct array_state_vector *arrays;
144
145 unsigned array_count;
146 int texture_units = 1, vertex_program_attribs = 0;
147 unsigned i, j;
148
149 GLboolean got_fog = GL_FALSE;
150 GLboolean got_secondary_color = GL_FALSE;
151
152
153 arrays = calloc(1, sizeof(struct array_state_vector));
154 state->array_state = arrays;
155
156 if (arrays == NULL) {
157 __glXSetError(gc, GL_OUT_OF_MEMORY);
158 return;
159 }
160
161 arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
162 arrays->new_DrawArrays_possible = GL_FALSE;
163 arrays->DrawArrays = NULL;
164
165 arrays->active_texture_unit = 0;
166
167
168 /* Determine how many arrays are actually needed. Only arrays that
169 * are supported by the server are create. For example, if the server
170 * supports only 2 texture units, then only 2 texture coordinate arrays
171 * are created.
172 *
173 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
174 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
175 * GL_EDGE_FLAG_ARRAY are supported.
176 */
177
178 array_count = 5;
179
180 if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit)
181 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
182 got_fog = GL_TRUE;
183 array_count++;
184 }
185
186 if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit)
187 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
188 got_secondary_color = GL_TRUE;
189 array_count++;
190 }
191
192 if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit)
193 || (gc->server_major > 1) || (gc->server_minor >= 3)) {
194 __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units);
195 }
196
197 if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) {
198 __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB,
199 GL_MAX_PROGRAM_ATTRIBS_ARB,
200 &vertex_program_attribs);
201 }
202
203 arrays->num_texture_units = texture_units;
204 arrays->num_vertex_program_attribs = vertex_program_attribs;
205 array_count += texture_units + vertex_program_attribs;
206 arrays->num_arrays = array_count;
207 arrays->arrays = calloc(array_count, sizeof(struct array_state));
208
209 if (arrays->arrays == NULL) {
210 free(arrays);
211 __glXSetError(gc, GL_OUT_OF_MEMORY);
212 return;
213 }
214
215 arrays->arrays[0].data_type = GL_FLOAT;
216 arrays->arrays[0].count = 3;
217 arrays->arrays[0].key = GL_NORMAL_ARRAY;
218 arrays->arrays[0].normalized = GL_TRUE;
219 arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
220
221 arrays->arrays[1].data_type = GL_FLOAT;
222 arrays->arrays[1].count = 4;
223 arrays->arrays[1].key = GL_COLOR_ARRAY;
224 arrays->arrays[1].normalized = GL_TRUE;
225 arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
226
227 arrays->arrays[2].data_type = GL_FLOAT;
228 arrays->arrays[2].count = 1;
229 arrays->arrays[2].key = GL_INDEX_ARRAY;
230 arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
231
232 arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
233 arrays->arrays[3].count = 1;
234 arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
235 arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
236
237 for (i = 0; i < texture_units; i++) {
238 arrays->arrays[4 + i].data_type = GL_FLOAT;
239 arrays->arrays[4 + i].count = 4;
240 arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
241
242 arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
243 arrays->arrays[4 + i].index = i;
244 }
245
246 i = 4 + texture_units;
247
248 if (got_fog) {
249 arrays->arrays[i].data_type = GL_FLOAT;
250 arrays->arrays[i].count = 1;
251 arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
252 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
253 i++;
254 }
255
256 if (got_secondary_color) {
257 arrays->arrays[i].data_type = GL_FLOAT;
258 arrays->arrays[i].count = 3;
259 arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
260 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
261 arrays->arrays[i].normalized = GL_TRUE;
262 i++;
263 }
264
265
266 for (j = 0; j < vertex_program_attribs; j++) {
267 const unsigned idx = (vertex_program_attribs - (j + 1));
268
269
270 arrays->arrays[idx + i].data_type = GL_FLOAT;
271 arrays->arrays[idx + i].count = 4;
272 arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
273
274 arrays->arrays[idx + i].old_DrawArrays_possible = 0;
275 arrays->arrays[idx + i].index = idx;
276 }
277
278 i += vertex_program_attribs;
279
280
281 /* Vertex array *must* be last because of the way that
282 * emit_DrawArrays_none works.
283 */
284
285 arrays->arrays[i].data_type = GL_FLOAT;
286 arrays->arrays[i].count = 4;
287 arrays->arrays[i].key = GL_VERTEX_ARRAY;
288 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
289
290 assert((i + 1) == arrays->num_arrays);
291
292 arrays->stack_index = 0;
293 arrays->stack = malloc(sizeof(struct array_stack_state)
294 * arrays->num_arrays
295 * __GL_CLIENT_ATTRIB_STACK_DEPTH);
296
297 if (arrays->stack == NULL) {
298 free(arrays->arrays);
299 free(arrays);
300 __glXSetError(gc, GL_OUT_OF_MEMORY);
301 return;
302 }
303 }
304
305
306 /**
307 * Calculate the size of a single vertex for the "none" protocol. This is
308 * essentially the size of all the immediate-mode commands required to
309 * implement the enabled vertex arrays.
310 */
311 static size_t
312 calculate_single_vertex_size_none(const struct array_state_vector *arrays)
313 {
314 size_t single_vertex_size = 0;
315 unsigned i;
316
317
318 for (i = 0; i < arrays->num_arrays; i++) {
319 if (arrays->arrays[i].enabled) {
320 single_vertex_size += arrays->arrays[i].header[0];
321 }
322 }
323
324 return single_vertex_size;
325 }
326
327
328 /**
329 * Emit a single element using non-DrawArrays protocol.
330 */
331 GLubyte *
332 emit_element_none(GLubyte * dst,
333 const struct array_state_vector * arrays, unsigned index)
334 {
335 unsigned i;
336
337
338 for (i = 0; i < arrays->num_arrays; i++) {
339 if (arrays->arrays[i].enabled) {
340 const size_t offset = index * arrays->arrays[i].true_stride;
341
342 /* The generic attributes can have more data than is in the
343 * elements. This is because a vertex array can be a 2 element,
344 * normalized, unsigned short, but the "closest" immediate mode
345 * protocol is for a 4Nus. Since the sizes are small, the
346 * performance impact on modern processors should be negligible.
347 */
348 (void) memset(dst, 0, arrays->arrays[i].header[0]);
349
350 (void) memcpy(dst, arrays->arrays[i].header, 4);
351
352 dst += 4;
353
354 if (arrays->arrays[i].key == GL_TEXTURE_COORD_ARRAY &&
355 arrays->arrays[i].index > 0) {
356 /* Multi-texture coordinate arrays require the texture target
357 * to be sent. For doubles it is after the data, for everything
358 * else it is before.
359 */
360 GLenum texture = arrays->arrays[i].index + GL_TEXTURE0;
361 if (arrays->arrays[i].data_type == GL_DOUBLE) {
362 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
363 arrays->arrays[i].element_size);
364 dst += arrays->arrays[i].element_size;
365 (void) memcpy(dst, &texture, 4);
366 dst += 4;
367 } else {
368 (void) memcpy(dst, &texture, 4);
369 dst += 4;
370 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
371 arrays->arrays[i].element_size);
372 dst += __GLX_PAD(arrays->arrays[i].element_size);
373 }
374 } else if (arrays->arrays[i].key == GL_VERTEX_ATTRIB_ARRAY_POINTER) {
375 /* Vertex attribute data requires the index sent first.
376 */
377 (void) memcpy(dst, &arrays->arrays[i].index, 4);
378 dst += 4;
379 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
380 arrays->arrays[i].element_size);
381 dst += __GLX_PAD(arrays->arrays[i].element_size);
382 } else {
383 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
384 arrays->arrays[i].element_size);
385 dst += __GLX_PAD(arrays->arrays[i].element_size);
386 }
387 }
388 }
389
390 return dst;
391 }
392
393
394 /**
395 * Emit a single element using "old" DrawArrays protocol from
396 * EXT_vertex_arrays / OpenGL 1.1.
397 */
398 GLubyte *
399 emit_element_old(GLubyte * dst,
400 const struct array_state_vector * arrays, unsigned index)
401 {
402 unsigned i;
403
404
405 for (i = 0; i < arrays->num_arrays; i++) {
406 if (arrays->arrays[i].enabled) {
407 const size_t offset = index * arrays->arrays[i].true_stride;
408
409 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
410 arrays->arrays[i].element_size);
411
412 dst += __GLX_PAD(arrays->arrays[i].element_size);
413 }
414 }
415
416 return dst;
417 }
418
419
420 struct array_state *
421 get_array_entry(const struct array_state_vector *arrays,
422 GLenum key, unsigned index)
423 {
424 unsigned i;
425
426 for (i = 0; i < arrays->num_arrays; i++) {
427 if ((arrays->arrays[i].key == key)
428 && (arrays->arrays[i].index == index)) {
429 return &arrays->arrays[i];
430 }
431 }
432
433 return NULL;
434 }
435
436
437 static GLboolean
438 allocate_array_info_cache(struct array_state_vector *arrays,
439 size_t required_size)
440 {
441 #define MAX_HEADER_SIZE 20
442 if (arrays->array_info_cache_buffer_size < required_size) {
443 GLubyte *temp = realloc(arrays->array_info_cache_base,
444 required_size + MAX_HEADER_SIZE);
445
446 if (temp == NULL) {
447 return GL_FALSE;
448 }
449
450 arrays->array_info_cache_base = temp;
451 arrays->array_info_cache = temp + MAX_HEADER_SIZE;
452 arrays->array_info_cache_buffer_size = required_size;
453 }
454
455 arrays->array_info_cache_size = required_size;
456 return GL_TRUE;
457 }
458
459
460 /**
461 */
462 void
463 fill_array_info_cache(struct array_state_vector *arrays)
464 {
465 GLboolean old_DrawArrays_possible;
466 unsigned i;
467
468
469 /* Determine how many arrays are enabled.
470 */
471
472 arrays->enabled_client_array_count = 0;
473 old_DrawArrays_possible = arrays->old_DrawArrays_possible;
474 for (i = 0; i < arrays->num_arrays; i++) {
475 if (arrays->arrays[i].enabled) {
476 arrays->enabled_client_array_count++;
477 old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
478 }
479 }
480
481 if (arrays->new_DrawArrays_possible) {
482 assert(!arrays->new_DrawArrays_possible);
483 }
484 else if (old_DrawArrays_possible) {
485 const size_t required_size = arrays->enabled_client_array_count * 12;
486 uint32_t *info;
487
488
489 if (!allocate_array_info_cache(arrays, required_size)) {
490 return;
491 }
492
493
494 info = (uint32_t *) arrays->array_info_cache;
495 for (i = 0; i < arrays->num_arrays; i++) {
496 if (arrays->arrays[i].enabled) {
497 *(info++) = arrays->arrays[i].data_type;
498 *(info++) = arrays->arrays[i].count;
499 *(info++) = arrays->arrays[i].key;
500 }
501 }
502
503 arrays->DrawArrays = emit_DrawArrays_old;
504 arrays->DrawElements = emit_DrawElements_old;
505 }
506 else {
507 arrays->DrawArrays = emit_DrawArrays_none;
508 arrays->DrawElements = emit_DrawElements_none;
509 }
510
511 arrays->array_info_cache_valid = GL_TRUE;
512 }
513
514
515 /**
516 * Emit a \c glDrawArrays command using the "none" protocol. That is,
517 * emit immediate-mode commands that are equivalent to the requiested
518 * \c glDrawArrays command. This is used with servers that don't support
519 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
520 * vertex state is enabled that is not compatible with that protocol.
521 */
522 void
523 emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count)
524 {
525 struct glx_context *gc = __glXGetCurrentContext();
526 const __GLXattribute *state =
527 (const __GLXattribute *) (gc->client_state_private);
528 struct array_state_vector *arrays = state->array_state;
529
530 size_t single_vertex_size;
531 GLubyte *pc;
532 unsigned i;
533 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
534 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
535
536
537 single_vertex_size = calculate_single_vertex_size_none(arrays);
538
539 pc = gc->pc;
540
541 (void) memcpy(pc, begin_cmd, 4);
542 *(int *) (pc + 4) = mode;
543
544 pc += 8;
545
546 for (i = 0; i < count; i++) {
547 if ((pc + single_vertex_size) >= gc->bufEnd) {
548 pc = __glXFlushRenderBuffer(gc, pc);
549 }
550
551 pc = emit_element_none(pc, arrays, first + i);
552 }
553
554 if ((pc + 4) >= gc->bufEnd) {
555 pc = __glXFlushRenderBuffer(gc, pc);
556 }
557
558 (void) memcpy(pc, end_cmd, 4);
559 pc += 4;
560
561 gc->pc = pc;
562 if (gc->pc > gc->limit) {
563 (void) __glXFlushRenderBuffer(gc, gc->pc);
564 }
565 }
566
567
568 /**
569 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
570 * protocol.
571 *
572 * \param gc GLX context.
573 * \param arrays Array state.
574 * \param elements_per_request Location to store the number of elements that
575 * can fit in a single Render / RenderLarge
576 * command.
577 * \param total_request Total number of requests for a RenderLarge
578 * command. If a Render command is used, this
579 * will be zero.
580 * \param mode Drawing mode.
581 * \param count Number of vertices.
582 *
583 * \returns
584 * A pointer to the buffer for array data.
585 */
586 static GLubyte *
587 emit_DrawArrays_header_old(struct glx_context * gc,
588 struct array_state_vector *arrays,
589 size_t * elements_per_request,
590 unsigned int *total_requests,
591 GLenum mode, GLsizei count)
592 {
593 size_t command_size;
594 size_t single_vertex_size;
595 const unsigned header_size = 16;
596 unsigned i;
597 GLubyte *pc;
598
599
600 /* Determine the size of the whole command. This includes the header,
601 * the ARRAY_INFO data and the array data. Once this size is calculated,
602 * it will be known whether a Render or RenderLarge command is needed.
603 */
604
605 single_vertex_size = 0;
606 for (i = 0; i < arrays->num_arrays; i++) {
607 if (arrays->arrays[i].enabled) {
608 single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size);
609 }
610 }
611
612 command_size = arrays->array_info_cache_size + header_size
613 + (single_vertex_size * count);
614
615
616 /* Write the header for either a Render command or a RenderLarge
617 * command. After the header is written, write the ARRAY_INFO data.
618 */
619
620 if (command_size > gc->maxSmallRenderCommandSize) {
621 /* maxSize is the maximum amount of data can be stuffed into a single
622 * packet. sz_xGLXRenderReq is added because bufSize is the maximum
623 * packet size minus sz_xGLXRenderReq.
624 */
625 const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
626 - sz_xGLXRenderLargeReq;
627 unsigned vertex_requests;
628
629
630 /* Calculate the number of data packets that will be required to send
631 * the whole command. To do this, the number of verticies that
632 * will fit in a single buffer must be calculated.
633 *
634 * The important value here is elements_per_request. This is the
635 * number of complete array elements that will fit in a single
636 * buffer. There may be some wasted space at the end of the buffer,
637 * but splitting elements across buffer boundries would be painful.
638 */
639
640 elements_per_request[0] = maxSize / single_vertex_size;
641
642 vertex_requests = (count + elements_per_request[0] - 1)
643 / elements_per_request[0];
644
645 *total_requests = vertex_requests + 1;
646
647
648 __glXFlushRenderBuffer(gc, gc->pc);
649
650 command_size += 4;
651
652 pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
653 *(uint32_t *) (pc + 0) = command_size;
654 *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays;
655 *(uint32_t *) (pc + 8) = count;
656 *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count;
657 *(uint32_t *) (pc + 16) = mode;
658
659 __glXSendLargeChunk(gc, 1, *total_requests, pc,
660 header_size + 4 + arrays->array_info_cache_size);
661
662 pc = gc->pc;
663 }
664 else {
665 if ((gc->pc + command_size) >= gc->bufEnd) {
666 (void) __glXFlushRenderBuffer(gc, gc->pc);
667 }
668
669 pc = gc->pc;
670 *(uint16_t *) (pc + 0) = command_size;
671 *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays;
672 *(uint32_t *) (pc + 4) = count;
673 *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count;
674 *(uint32_t *) (pc + 12) = mode;
675
676 pc += header_size;
677
678 (void) memcpy(pc, arrays->array_info_cache,
679 arrays->array_info_cache_size);
680 pc += arrays->array_info_cache_size;
681
682 *elements_per_request = count;
683 *total_requests = 0;
684 }
685
686
687 return pc;
688 }
689
690
691 /**
692 */
693 void
694 emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count)
695 {
696 struct glx_context *gc = __glXGetCurrentContext();
697 const __GLXattribute *state =
698 (const __GLXattribute *) (gc->client_state_private);
699 struct array_state_vector *arrays = state->array_state;
700
701 GLubyte *pc;
702 size_t elements_per_request;
703 unsigned total_requests = 0;
704 unsigned i;
705 size_t total_sent = 0;
706
707
708 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
709 &total_requests, mode, count);
710
711
712 /* Write the arrays.
713 */
714
715 if (total_requests == 0) {
716 assert(elements_per_request >= count);
717
718 for (i = 0; i < count; i++) {
719 pc = emit_element_old(pc, arrays, i + first);
720 }
721
722 assert(pc <= gc->bufEnd);
723
724 gc->pc = pc;
725 if (gc->pc > gc->limit) {
726 (void) __glXFlushRenderBuffer(gc, gc->pc);
727 }
728 }
729 else {
730 unsigned req;
731
732
733 for (req = 2; req <= total_requests; req++) {
734 if (count < elements_per_request) {
735 elements_per_request = count;
736 }
737
738 pc = gc->pc;
739 for (i = 0; i < elements_per_request; i++) {
740 pc = emit_element_old(pc, arrays, i + first);
741 }
742
743 first += elements_per_request;
744
745 total_sent += (size_t) (pc - gc->pc);
746 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
747
748 count -= elements_per_request;
749 }
750 }
751 }
752
753
754 void
755 emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
756 const GLvoid * indices)
757 {
758 struct glx_context *gc = __glXGetCurrentContext();
759 const __GLXattribute *state =
760 (const __GLXattribute *) (gc->client_state_private);
761 struct array_state_vector *arrays = state->array_state;
762 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
763 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
764
765 GLubyte *pc;
766 size_t single_vertex_size;
767 unsigned i;
768
769
770 single_vertex_size = calculate_single_vertex_size_none(arrays);
771
772
773 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
774 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
775 }
776
777 pc = gc->pc;
778
779 (void) memcpy(pc, begin_cmd, 4);
780 *(int *) (pc + 4) = mode;
781
782 pc += 8;
783
784 for (i = 0; i < count; i++) {
785 unsigned index = 0;
786
787 if ((pc + single_vertex_size) >= gc->bufEnd) {
788 pc = __glXFlushRenderBuffer(gc, pc);
789 }
790
791 switch (type) {
792 case GL_UNSIGNED_INT:
793 index = (unsigned) (((GLuint *) indices)[i]);
794 break;
795 case GL_UNSIGNED_SHORT:
796 index = (unsigned) (((GLushort *) indices)[i]);
797 break;
798 case GL_UNSIGNED_BYTE:
799 index = (unsigned) (((GLubyte *) indices)[i]);
800 break;
801 }
802 pc = emit_element_none(pc, arrays, index);
803 }
804
805 if ((pc + 4) >= gc->bufEnd) {
806 pc = __glXFlushRenderBuffer(gc, pc);
807 }
808
809 (void) memcpy(pc, end_cmd, 4);
810 pc += 4;
811
812 gc->pc = pc;
813 if (gc->pc > gc->limit) {
814 (void) __glXFlushRenderBuffer(gc, gc->pc);
815 }
816 }
817
818
819 /**
820 */
821 void
822 emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
823 const GLvoid * indices)
824 {
825 struct glx_context *gc = __glXGetCurrentContext();
826 const __GLXattribute *state =
827 (const __GLXattribute *) (gc->client_state_private);
828 struct array_state_vector *arrays = state->array_state;
829
830 GLubyte *pc;
831 size_t elements_per_request;
832 unsigned total_requests = 0;
833 unsigned i;
834 unsigned req;
835 unsigned req_element = 0;
836
837
838 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
839 &total_requests, mode, count);
840
841
842 /* Write the arrays.
843 */
844
845 req = 2;
846 while (count > 0) {
847 if (count < elements_per_request) {
848 elements_per_request = count;
849 }
850
851 switch (type) {
852 case GL_UNSIGNED_INT:{
853 const GLuint *ui_ptr = (const GLuint *) indices + req_element;
854
855 for (i = 0; i < elements_per_request; i++) {
856 const GLint index = (GLint) * (ui_ptr++);
857 pc = emit_element_old(pc, arrays, index);
858 }
859 break;
860 }
861 case GL_UNSIGNED_SHORT:{
862 const GLushort *us_ptr = (const GLushort *) indices + req_element;
863
864 for (i = 0; i < elements_per_request; i++) {
865 const GLint index = (GLint) * (us_ptr++);
866 pc = emit_element_old(pc, arrays, index);
867 }
868 break;
869 }
870 case GL_UNSIGNED_BYTE:{
871 const GLubyte *ub_ptr = (const GLubyte *) indices + req_element;
872
873 for (i = 0; i < elements_per_request; i++) {
874 const GLint index = (GLint) * (ub_ptr++);
875 pc = emit_element_old(pc, arrays, index);
876 }
877 break;
878 }
879 }
880
881 if (total_requests != 0) {
882 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
883 pc = gc->pc;
884 req++;
885 }
886
887 count -= elements_per_request;
888 req_element += elements_per_request;
889 }
890
891
892 assert((total_requests == 0) || ((req - 1) == total_requests));
893
894 if (total_requests == 0) {
895 assert(pc <= gc->bufEnd);
896
897 gc->pc = pc;
898 if (gc->pc > gc->limit) {
899 (void) __glXFlushRenderBuffer(gc, gc->pc);
900 }
901 }
902 }
903
904
905 /**
906 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
907 * If it is not valid, then an error code is set in the GLX context.
908 *
909 * \returns
910 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
911 */
912 static GLboolean
913 validate_mode(struct glx_context * gc, GLenum mode)
914 {
915 switch (mode) {
916 case GL_POINTS:
917 case GL_LINE_STRIP:
918 case GL_LINE_LOOP:
919 case GL_LINES:
920 case GL_TRIANGLE_STRIP:
921 case GL_TRIANGLE_FAN:
922 case GL_TRIANGLES:
923 case GL_QUAD_STRIP:
924 case GL_QUADS:
925 case GL_POLYGON:
926 break;
927 default:
928 __glXSetError(gc, GL_INVALID_ENUM);
929 return GL_FALSE;
930 }
931
932 return GL_TRUE;
933 }
934
935
936 /**
937 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
938 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
939 * being set. A value of zero will not result in an error being set, but
940 * will result in \c GL_FALSE being returned.
941 *
942 * \returns
943 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
944 */
945 static GLboolean
946 validate_count(struct glx_context * gc, GLsizei count)
947 {
948 if (count < 0) {
949 __glXSetError(gc, GL_INVALID_VALUE);
950 }
951
952 return (count > 0);
953 }
954
955
956 /**
957 * Validate that the \c type parameter to \c glDrawElements, et. al. is
958 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
959 * \c GL_UNSIGNED_INT are valid.
960 *
961 * \returns
962 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
963 */
964 static GLboolean
965 validate_type(struct glx_context * gc, GLenum type)
966 {
967 switch (type) {
968 case GL_UNSIGNED_INT:
969 case GL_UNSIGNED_SHORT:
970 case GL_UNSIGNED_BYTE:
971 return GL_TRUE;
972 default:
973 __glXSetError(gc, GL_INVALID_ENUM);
974 return GL_FALSE;
975 }
976 }
977
978
979 void
980 __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
981 {
982 struct glx_context *gc = __glXGetCurrentContext();
983 const __GLXattribute *state =
984 (const __GLXattribute *) (gc->client_state_private);
985 struct array_state_vector *arrays = state->array_state;
986
987
988 if (validate_mode(gc, mode) && validate_count(gc, count)) {
989 if (!arrays->array_info_cache_valid) {
990 fill_array_info_cache(arrays);
991 }
992
993 arrays->DrawArrays(mode, first, count);
994 }
995 }
996
997
998 void
999 __indirect_glArrayElement(GLint index)
1000 {
1001 struct glx_context *gc = __glXGetCurrentContext();
1002 const __GLXattribute *state =
1003 (const __GLXattribute *) (gc->client_state_private);
1004 struct array_state_vector *arrays = state->array_state;
1005
1006 size_t single_vertex_size;
1007
1008
1009 single_vertex_size = calculate_single_vertex_size_none(arrays);
1010
1011 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
1012 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
1013 }
1014
1015 gc->pc = emit_element_none(gc->pc, arrays, index);
1016
1017 if (gc->pc > gc->limit) {
1018 (void) __glXFlushRenderBuffer(gc, gc->pc);
1019 }
1020 }
1021
1022
1023 void
1024 __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
1025 const GLvoid * indices)
1026 {
1027 struct glx_context *gc = __glXGetCurrentContext();
1028 const __GLXattribute *state =
1029 (const __GLXattribute *) (gc->client_state_private);
1030 struct array_state_vector *arrays = state->array_state;
1031
1032
1033 if (validate_mode(gc, mode) && validate_count(gc, count)
1034 && validate_type(gc, type)) {
1035 if (!arrays->array_info_cache_valid) {
1036 fill_array_info_cache(arrays);
1037 }
1038
1039 arrays->DrawElements(mode, count, type, indices);
1040 }
1041 }
1042
1043
1044 void
1045 __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
1046 GLsizei count, GLenum type,
1047 const GLvoid * indices)
1048 {
1049 struct glx_context *gc = __glXGetCurrentContext();
1050 const __GLXattribute *state =
1051 (const __GLXattribute *) (gc->client_state_private);
1052 struct array_state_vector *arrays = state->array_state;
1053
1054
1055 if (validate_mode(gc, mode) && validate_count(gc, count)
1056 && validate_type(gc, type)) {
1057 if (end < start) {
1058 __glXSetError(gc, GL_INVALID_VALUE);
1059 return;
1060 }
1061
1062 if (!arrays->array_info_cache_valid) {
1063 fill_array_info_cache(arrays);
1064 }
1065
1066 arrays->DrawElements(mode, count, type, indices);
1067 }
1068 }
1069
1070
1071 void
1072 __indirect_glMultiDrawArrays(GLenum mode, const GLint *first,
1073 const GLsizei *count, GLsizei primcount)
1074 {
1075 struct glx_context *gc = __glXGetCurrentContext();
1076 const __GLXattribute *state =
1077 (const __GLXattribute *) (gc->client_state_private);
1078 struct array_state_vector *arrays = state->array_state;
1079 GLsizei i;
1080
1081
1082 if (validate_mode(gc, mode)) {
1083 if (!arrays->array_info_cache_valid) {
1084 fill_array_info_cache(arrays);
1085 }
1086
1087 for (i = 0; i < primcount; i++) {
1088 if (validate_count(gc, count[i])) {
1089 arrays->DrawArrays(mode, first[i], count[i]);
1090 }
1091 }
1092 }
1093 }
1094
1095
1096 void
1097 __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count,
1098 GLenum type, const GLvoid * const * indices,
1099 GLsizei primcount)
1100 {
1101 struct glx_context *gc = __glXGetCurrentContext();
1102 const __GLXattribute *state =
1103 (const __GLXattribute *) (gc->client_state_private);
1104 struct array_state_vector *arrays = state->array_state;
1105 GLsizei i;
1106
1107
1108 if (validate_mode(gc, mode) && validate_type(gc, type)) {
1109 if (!arrays->array_info_cache_valid) {
1110 fill_array_info_cache(arrays);
1111 }
1112
1113 for (i = 0; i < primcount; i++) {
1114 if (validate_count(gc, count[i])) {
1115 arrays->DrawElements(mode, count[i], type, indices[i]);
1116 }
1117 }
1118 }
1119 }
1120
1121
1122 /* The HDR_SIZE macro argument is the command header size (4 bytes)
1123 * plus any additional index word e.g. for texture units or vertex
1124 * attributes.
1125 */
1126 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
1127 do { \
1128 (a)->data = PTR; \
1129 (a)->data_type = TYPE; \
1130 (a)->user_stride = STRIDE; \
1131 (a)->count = COUNT; \
1132 (a)->normalized = NORMALIZED; \
1133 \
1134 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \
1135 (a)->true_stride = (STRIDE == 0) \
1136 ? (a)->element_size : STRIDE; \
1137 \
1138 (a)->header[0] = __GLX_PAD(HDR_SIZE + (a)->element_size); \
1139 (a)->header[1] = OPCODE; \
1140 } while(0)
1141
1142
1143 void
1144 __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride,
1145 const GLvoid * pointer)
1146 {
1147 static const uint16_t short_ops[5] = {
1148 0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
1149 };
1150 static const uint16_t int_ops[5] = {
1151 0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
1152 };
1153 static const uint16_t float_ops[5] = {
1154 0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
1155 };
1156 static const uint16_t double_ops[5] = {
1157 0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
1158 };
1159 uint16_t opcode;
1160 struct glx_context *gc = __glXGetCurrentContext();
1161 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1162 struct array_state_vector *arrays = state->array_state;
1163 struct array_state *a;
1164
1165
1166 if (size < 2 || size > 4 || stride < 0) {
1167 __glXSetError(gc, GL_INVALID_VALUE);
1168 return;
1169 }
1170
1171 switch (type) {
1172 case GL_SHORT:
1173 opcode = short_ops[size];
1174 break;
1175 case GL_INT:
1176 opcode = int_ops[size];
1177 break;
1178 case GL_FLOAT:
1179 opcode = float_ops[size];
1180 break;
1181 case GL_DOUBLE:
1182 opcode = double_ops[size];
1183 break;
1184 default:
1185 __glXSetError(gc, GL_INVALID_ENUM);
1186 return;
1187 }
1188
1189 a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0);
1190 assert(a != NULL);
1191 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4,
1192 opcode);
1193
1194 if (a->enabled) {
1195 arrays->array_info_cache_valid = GL_FALSE;
1196 }
1197 }
1198
1199
1200 void
1201 __indirect_glNormalPointer(GLenum type, GLsizei stride,
1202 const GLvoid * pointer)
1203 {
1204 uint16_t opcode;
1205 struct glx_context *gc = __glXGetCurrentContext();
1206 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1207 struct array_state_vector *arrays = state->array_state;
1208 struct array_state *a;
1209
1210
1211 if (stride < 0) {
1212 __glXSetError(gc, GL_INVALID_VALUE);
1213 return;
1214 }
1215
1216 switch (type) {
1217 case GL_BYTE:
1218 opcode = X_GLrop_Normal3bv;
1219 break;
1220 case GL_SHORT:
1221 opcode = X_GLrop_Normal3sv;
1222 break;
1223 case GL_INT:
1224 opcode = X_GLrop_Normal3iv;
1225 break;
1226 case GL_FLOAT:
1227 opcode = X_GLrop_Normal3fv;
1228 break;
1229 case GL_DOUBLE:
1230 opcode = X_GLrop_Normal3dv;
1231 break;
1232 default:
1233 __glXSetError(gc, GL_INVALID_ENUM);
1234 return;
1235 }
1236
1237 a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0);
1238 assert(a != NULL);
1239 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode);
1240
1241 if (a->enabled) {
1242 arrays->array_info_cache_valid = GL_FALSE;
1243 }
1244 }
1245
1246
1247 void
1248 __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride,
1249 const GLvoid * pointer)
1250 {
1251 static const uint16_t byte_ops[5] = {
1252 0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
1253 };
1254 static const uint16_t ubyte_ops[5] = {
1255 0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
1256 };
1257 static const uint16_t short_ops[5] = {
1258 0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
1259 };
1260 static const uint16_t ushort_ops[5] = {
1261 0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
1262 };
1263 static const uint16_t int_ops[5] = {
1264 0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
1265 };
1266 static const uint16_t uint_ops[5] = {
1267 0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
1268 };
1269 static const uint16_t float_ops[5] = {
1270 0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
1271 };
1272 static const uint16_t double_ops[5] = {
1273 0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
1274 };
1275 uint16_t opcode;
1276 struct glx_context *gc = __glXGetCurrentContext();
1277 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1278 struct array_state_vector *arrays = state->array_state;
1279 struct array_state *a;
1280
1281
1282 if (size < 3 || size > 4 || stride < 0) {
1283 __glXSetError(gc, GL_INVALID_VALUE);
1284 return;
1285 }
1286
1287 switch (type) {
1288 case GL_BYTE:
1289 opcode = byte_ops[size];
1290 break;
1291 case GL_UNSIGNED_BYTE:
1292 opcode = ubyte_ops[size];
1293 break;
1294 case GL_SHORT:
1295 opcode = short_ops[size];
1296 break;
1297 case GL_UNSIGNED_SHORT:
1298 opcode = ushort_ops[size];
1299 break;
1300 case GL_INT:
1301 opcode = int_ops[size];
1302 break;
1303 case GL_UNSIGNED_INT:
1304 opcode = uint_ops[size];
1305 break;
1306 case GL_FLOAT:
1307 opcode = float_ops[size];
1308 break;
1309 case GL_DOUBLE:
1310 opcode = double_ops[size];
1311 break;
1312 default:
1313 __glXSetError(gc, GL_INVALID_ENUM);
1314 return;
1315 }
1316
1317 a = get_array_entry(arrays, GL_COLOR_ARRAY, 0);
1318 assert(a != NULL);
1319 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1320
1321 if (a->enabled) {
1322 arrays->array_info_cache_valid = GL_FALSE;
1323 }
1324 }
1325
1326
1327 void
1328 __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer)
1329 {
1330 uint16_t opcode;
1331 struct glx_context *gc = __glXGetCurrentContext();
1332 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1333 struct array_state_vector *arrays = state->array_state;
1334 struct array_state *a;
1335
1336
1337 if (stride < 0) {
1338 __glXSetError(gc, GL_INVALID_VALUE);
1339 return;
1340 }
1341
1342 switch (type) {
1343 case GL_UNSIGNED_BYTE:
1344 opcode = X_GLrop_Indexubv;
1345 break;
1346 case GL_SHORT:
1347 opcode = X_GLrop_Indexsv;
1348 break;
1349 case GL_INT:
1350 opcode = X_GLrop_Indexiv;
1351 break;
1352 case GL_FLOAT:
1353 opcode = X_GLrop_Indexfv;
1354 break;
1355 case GL_DOUBLE:
1356 opcode = X_GLrop_Indexdv;
1357 break;
1358 default:
1359 __glXSetError(gc, GL_INVALID_ENUM);
1360 return;
1361 }
1362
1363 a = get_array_entry(arrays, GL_INDEX_ARRAY, 0);
1364 assert(a != NULL);
1365 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1366
1367 if (a->enabled) {
1368 arrays->array_info_cache_valid = GL_FALSE;
1369 }
1370 }
1371
1372
1373 void
1374 __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer)
1375 {
1376 struct glx_context *gc = __glXGetCurrentContext();
1377 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1378 struct array_state_vector *arrays = state->array_state;
1379 struct array_state *a;
1380
1381
1382 if (stride < 0) {
1383 __glXSetError(gc, GL_INVALID_VALUE);
1384 return;
1385 }
1386
1387
1388 a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0);
1389 assert(a != NULL);
1390 COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
1391 4, X_GLrop_EdgeFlagv);
1392
1393 if (a->enabled) {
1394 arrays->array_info_cache_valid = GL_FALSE;
1395 }
1396 }
1397
1398
1399 void
1400 __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride,
1401 const GLvoid * pointer)
1402 {
1403 static const uint16_t short_ops[5] = {
1404 0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv,
1405 X_GLrop_TexCoord4sv
1406 };
1407 static const uint16_t int_ops[5] = {
1408 0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv,
1409 X_GLrop_TexCoord4iv
1410 };
1411 static const uint16_t float_ops[5] = {
1412 0, X_GLrop_TexCoord1fv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv,
1413 X_GLrop_TexCoord4fv
1414 };
1415 static const uint16_t double_ops[5] = {
1416 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv,
1417 X_GLrop_TexCoord4dv
1418 };
1419
1420 static const uint16_t mshort_ops[5] = {
1421 0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB,
1422 X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
1423 };
1424 static const uint16_t mint_ops[5] = {
1425 0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB,
1426 X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
1427 };
1428 static const uint16_t mfloat_ops[5] = {
1429 0, X_GLrop_MultiTexCoord1fvARB, X_GLrop_MultiTexCoord2fvARB,
1430 X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
1431 };
1432 static const uint16_t mdouble_ops[5] = {
1433 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB,
1434 X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
1435 };
1436
1437 uint16_t opcode;
1438 struct glx_context *gc = __glXGetCurrentContext();
1439 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1440 struct array_state_vector *arrays = state->array_state;
1441 struct array_state *a;
1442 unsigned header_size;
1443 unsigned index;
1444
1445
1446 if (size < 1 || size > 4 || stride < 0) {
1447 __glXSetError(gc, GL_INVALID_VALUE);
1448 return;
1449 }
1450
1451 index = arrays->active_texture_unit;
1452 if (index == 0) {
1453 switch (type) {
1454 case GL_SHORT:
1455 opcode = short_ops[size];
1456 break;
1457 case GL_INT:
1458 opcode = int_ops[size];
1459 break;
1460 case GL_FLOAT:
1461 opcode = float_ops[size];
1462 break;
1463 case GL_DOUBLE:
1464 opcode = double_ops[size];
1465 break;
1466 default:
1467 __glXSetError(gc, GL_INVALID_ENUM);
1468 return;
1469 }
1470
1471 header_size = 4;
1472 }
1473 else {
1474 switch (type) {
1475 case GL_SHORT:
1476 opcode = mshort_ops[size];
1477 break;
1478 case GL_INT:
1479 opcode = mint_ops[size];
1480 break;
1481 case GL_FLOAT:
1482 opcode = mfloat_ops[size];
1483 break;
1484 case GL_DOUBLE:
1485 opcode = mdouble_ops[size];
1486 break;
1487 default:
1488 __glXSetError(gc, GL_INVALID_ENUM);
1489 return;
1490 }
1491
1492 header_size = 8;
1493 }
1494
1495 a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index);
1496 assert(a != NULL);
1497 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE,
1498 header_size, opcode);
1499
1500 if (a->enabled) {
1501 arrays->array_info_cache_valid = GL_FALSE;
1502 }
1503 }
1504
1505
1506 void
1507 __indirect_glSecondaryColorPointer(GLint size, GLenum type, GLsizei stride,
1508 const GLvoid * pointer)
1509 {
1510 uint16_t opcode;
1511 struct glx_context *gc = __glXGetCurrentContext();
1512 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1513 struct array_state_vector *arrays = state->array_state;
1514 struct array_state *a;
1515
1516
1517 if (size != 3 || stride < 0) {
1518 __glXSetError(gc, GL_INVALID_VALUE);
1519 return;
1520 }
1521
1522 switch (type) {
1523 case GL_BYTE:
1524 opcode = 4126;
1525 break;
1526 case GL_UNSIGNED_BYTE:
1527 opcode = 4131;
1528 break;
1529 case GL_SHORT:
1530 opcode = 4127;
1531 break;
1532 case GL_UNSIGNED_SHORT:
1533 opcode = 4132;
1534 break;
1535 case GL_INT:
1536 opcode = 4128;
1537 break;
1538 case GL_UNSIGNED_INT:
1539 opcode = 4133;
1540 break;
1541 case GL_FLOAT:
1542 opcode = 4129;
1543 break;
1544 case GL_DOUBLE:
1545 opcode = 4130;
1546 break;
1547 default:
1548 __glXSetError(gc, GL_INVALID_ENUM);
1549 return;
1550 }
1551
1552 a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0);
1553 if (a == NULL) {
1554 __glXSetError(gc, GL_INVALID_OPERATION);
1555 return;
1556 }
1557
1558 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1559
1560 if (a->enabled) {
1561 arrays->array_info_cache_valid = GL_FALSE;
1562 }
1563 }
1564
1565
1566 void
1567 __indirect_glFogCoordPointer(GLenum type, GLsizei stride,
1568 const GLvoid * pointer)
1569 {
1570 uint16_t opcode;
1571 struct glx_context *gc = __glXGetCurrentContext();
1572 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1573 struct array_state_vector *arrays = state->array_state;
1574 struct array_state *a;
1575
1576
1577 if (stride < 0) {
1578 __glXSetError(gc, GL_INVALID_VALUE);
1579 return;
1580 }
1581
1582 switch (type) {
1583 case GL_FLOAT:
1584 opcode = 4124;
1585 break;
1586 case GL_DOUBLE:
1587 opcode = 4125;
1588 break;
1589 default:
1590 __glXSetError(gc, GL_INVALID_ENUM);
1591 return;
1592 }
1593
1594 a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0);
1595 if (a == NULL) {
1596 __glXSetError(gc, GL_INVALID_OPERATION);
1597 return;
1598 }
1599
1600 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1601
1602 if (a->enabled) {
1603 arrays->array_info_cache_valid = GL_FALSE;
1604 }
1605 }
1606
1607
1608 void
1609 __indirect_glVertexAttribPointer(GLuint index, GLint size,
1610 GLenum type, GLboolean normalized,
1611 GLsizei stride, const GLvoid * pointer)
1612 {
1613 static const uint16_t short_ops[5] = {
1614 0, X_GLrop_VertexAttrib1svARB, X_GLrop_VertexAttrib2svARB,
1615 X_GLrop_VertexAttrib3svARB, X_GLrop_VertexAttrib4svARB
1616 };
1617 static const uint16_t float_ops[5] = {
1618 0, X_GLrop_VertexAttrib1fvARB, X_GLrop_VertexAttrib2fvARB,
1619 X_GLrop_VertexAttrib3fvARB, X_GLrop_VertexAttrib4fvARB
1620 };
1621 static const uint16_t double_ops[5] = {
1622 0, X_GLrop_VertexAttrib1dvARB, X_GLrop_VertexAttrib2dvARB,
1623 X_GLrop_VertexAttrib3dvARB, X_GLrop_VertexAttrib4dvARB
1624 };
1625
1626 uint16_t opcode;
1627 struct glx_context *gc = __glXGetCurrentContext();
1628 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1629 struct array_state_vector *arrays = state->array_state;
1630 struct array_state *a;
1631 unsigned true_immediate_count;
1632 unsigned true_immediate_size;
1633
1634
1635 if ((size < 1) || (size > 4) || (stride < 0)
1636 || (index > arrays->num_vertex_program_attribs)) {
1637 __glXSetError(gc, GL_INVALID_VALUE);
1638 return;
1639 }
1640
1641 if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
1642 switch (type) {
1643 case GL_BYTE:
1644 opcode = X_GLrop_VertexAttrib4NbvARB;
1645 break;
1646 case GL_UNSIGNED_BYTE:
1647 opcode = X_GLrop_VertexAttrib4NubvARB;
1648 break;
1649 case GL_SHORT:
1650 opcode = X_GLrop_VertexAttrib4NsvARB;
1651 break;
1652 case GL_UNSIGNED_SHORT:
1653 opcode = X_GLrop_VertexAttrib4NusvARB;
1654 break;
1655 case GL_INT:
1656 opcode = X_GLrop_VertexAttrib4NivARB;
1657 break;
1658 case GL_UNSIGNED_INT:
1659 opcode = X_GLrop_VertexAttrib4NuivARB;
1660 break;
1661 default:
1662 __glXSetError(gc, GL_INVALID_ENUM);
1663 return;
1664 }
1665
1666 true_immediate_count = 4;
1667 }
1668 else {
1669 true_immediate_count = size;
1670
1671 switch (type) {
1672 case GL_BYTE:
1673 opcode = X_GLrop_VertexAttrib4bvARB;
1674 true_immediate_count = 4;
1675 break;
1676 case GL_UNSIGNED_BYTE:
1677 opcode = X_GLrop_VertexAttrib4ubvARB;
1678 true_immediate_count = 4;
1679 break;
1680 case GL_SHORT:
1681 opcode = short_ops[size];
1682 break;
1683 case GL_UNSIGNED_SHORT:
1684 opcode = X_GLrop_VertexAttrib4usvARB;
1685 true_immediate_count = 4;
1686 break;
1687 case GL_INT:
1688 opcode = X_GLrop_VertexAttrib4ivARB;
1689 true_immediate_count = 4;
1690 break;
1691 case GL_UNSIGNED_INT:
1692 opcode = X_GLrop_VertexAttrib4uivARB;
1693 true_immediate_count = 4;
1694 break;
1695 case GL_FLOAT:
1696 opcode = float_ops[size];
1697 break;
1698 case GL_DOUBLE:
1699 opcode = double_ops[size];
1700 break;
1701 default:
1702 __glXSetError(gc, GL_INVALID_ENUM);
1703 return;
1704 }
1705 }
1706
1707 a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index);
1708 if (a == NULL) {
1709 __glXSetError(gc, GL_INVALID_OPERATION);
1710 return;
1711 }
1712
1713 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8,
1714 opcode);
1715
1716 true_immediate_size = __glXTypeSize(type) * true_immediate_count;
1717 a->header[0] = __GLX_PAD(8 + true_immediate_size);
1718
1719 if (a->enabled) {
1720 arrays->array_info_cache_valid = GL_FALSE;
1721 }
1722 }
1723
1724
1725 /**
1726 * I don't have 100% confidence that this is correct. The different rules
1727 * about whether or not generic vertex attributes alias "classic" vertex
1728 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
1729 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My
1730 * feeling is that the client-side doesn't have to worry about it. The
1731 * client just sends all the data to the server and lets the server deal
1732 * with it.
1733 */
1734 void
1735 __indirect_glVertexAttribPointerNV(GLuint index, GLint size,
1736 GLenum type, GLsizei stride,
1737 const GLvoid * pointer)
1738 {
1739 struct glx_context *gc = __glXGetCurrentContext();
1740 GLboolean normalized = GL_FALSE;
1741
1742
1743 switch (type) {
1744 case GL_UNSIGNED_BYTE:
1745 if (size != 4) {
1746 __glXSetError(gc, GL_INVALID_VALUE);
1747 return;
1748 }
1749 normalized = GL_TRUE;
1750
1751 case GL_SHORT:
1752 case GL_FLOAT:
1753 case GL_DOUBLE:
1754 __indirect_glVertexAttribPointer(index, size, type,
1755 normalized, stride, pointer);
1756 return;
1757 default:
1758 __glXSetError(gc, GL_INVALID_ENUM);
1759 return;
1760 }
1761 }
1762
1763
1764 void
1765 __indirect_glClientActiveTexture(GLenum texture)
1766 {
1767 struct glx_context *const gc = __glXGetCurrentContext();
1768 __GLXattribute *const state =
1769 (__GLXattribute *) (gc->client_state_private);
1770 struct array_state_vector *const arrays = state->array_state;
1771 const GLint unit = (GLint) texture - GL_TEXTURE0;
1772
1773
1774 if ((unit < 0) || (unit >= arrays->num_texture_units)) {
1775 __glXSetError(gc, GL_INVALID_ENUM);
1776 return;
1777 }
1778
1779 arrays->active_texture_unit = unit;
1780 }
1781
1782
1783 /**
1784 * Modify the enable state for the selected array
1785 */
1786 GLboolean
1787 __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index,
1788 GLboolean enable)
1789 {
1790 struct array_state_vector *arrays = state->array_state;
1791 struct array_state *a;
1792
1793
1794 /* Texture coordinate arrays have an implict index set when the
1795 * application calls glClientActiveTexture.
1796 */
1797 if (key == GL_TEXTURE_COORD_ARRAY) {
1798 index = arrays->active_texture_unit;
1799 }
1800
1801 a = get_array_entry(arrays, key, index);
1802
1803 if ((a != NULL) && (a->enabled != enable)) {
1804 a->enabled = enable;
1805 arrays->array_info_cache_valid = GL_FALSE;
1806 }
1807
1808 return (a != NULL);
1809 }
1810
1811
1812 void
1813 __glXArrayDisableAll(__GLXattribute * state)
1814 {
1815 struct array_state_vector *arrays = state->array_state;
1816 unsigned i;
1817
1818
1819 for (i = 0; i < arrays->num_arrays; i++) {
1820 arrays->arrays[i].enabled = GL_FALSE;
1821 }
1822
1823 arrays->array_info_cache_valid = GL_FALSE;
1824 }
1825
1826
1827 /**
1828 */
1829 GLboolean
1830 __glXGetArrayEnable(const __GLXattribute * const state,
1831 GLenum key, unsigned index, GLintptr * dest)
1832 {
1833 const struct array_state_vector *arrays = state->array_state;
1834 const struct array_state *a =
1835 get_array_entry((struct array_state_vector *) arrays,
1836 key, index);
1837
1838 if (a != NULL) {
1839 *dest = (GLintptr) a->enabled;
1840 }
1841
1842 return (a != NULL);
1843 }
1844
1845
1846 /**
1847 */
1848 GLboolean
1849 __glXGetArrayType(const __GLXattribute * const state,
1850 GLenum key, unsigned index, GLintptr * dest)
1851 {
1852 const struct array_state_vector *arrays = state->array_state;
1853 const struct array_state *a =
1854 get_array_entry((struct array_state_vector *) arrays,
1855 key, index);
1856
1857 if (a != NULL) {
1858 *dest = (GLintptr) a->data_type;
1859 }
1860
1861 return (a != NULL);
1862 }
1863
1864
1865 /**
1866 */
1867 GLboolean
1868 __glXGetArraySize(const __GLXattribute * const state,
1869 GLenum key, unsigned index, GLintptr * dest)
1870 {
1871 const struct array_state_vector *arrays = state->array_state;
1872 const struct array_state *a =
1873 get_array_entry((struct array_state_vector *) arrays,
1874 key, index);
1875
1876 if (a != NULL) {
1877 *dest = (GLintptr) a->count;
1878 }
1879
1880 return (a != NULL);
1881 }
1882
1883
1884 /**
1885 */
1886 GLboolean
1887 __glXGetArrayStride(const __GLXattribute * const state,
1888 GLenum key, unsigned index, GLintptr * dest)
1889 {
1890 const struct array_state_vector *arrays = state->array_state;
1891 const struct array_state *a =
1892 get_array_entry((struct array_state_vector *) arrays,
1893 key, index);
1894
1895 if (a != NULL) {
1896 *dest = (GLintptr) a->user_stride;
1897 }
1898
1899 return (a != NULL);
1900 }
1901
1902
1903 /**
1904 */
1905 GLboolean
1906 __glXGetArrayPointer(const __GLXattribute * const state,
1907 GLenum key, unsigned index, void **dest)
1908 {
1909 const struct array_state_vector *arrays = state->array_state;
1910 const struct array_state *a =
1911 get_array_entry((struct array_state_vector *) arrays,
1912 key, index);
1913
1914
1915 if (a != NULL) {
1916 *dest = (void *) (a->data);
1917 }
1918
1919 return (a != NULL);
1920 }
1921
1922
1923 /**
1924 */
1925 GLboolean
1926 __glXGetArrayNormalized(const __GLXattribute * const state,
1927 GLenum key, unsigned index, GLintptr * dest)
1928 {
1929 const struct array_state_vector *arrays = state->array_state;
1930 const struct array_state *a =
1931 get_array_entry((struct array_state_vector *) arrays,
1932 key, index);
1933
1934
1935 if (a != NULL) {
1936 *dest = (GLintptr) a->normalized;
1937 }
1938
1939 return (a != NULL);
1940 }
1941
1942
1943 /**
1944 */
1945 GLuint
1946 __glXGetActiveTextureUnit(const __GLXattribute * const state)
1947 {
1948 return state->array_state->active_texture_unit;
1949 }
1950
1951
1952 void
1953 __glXPushArrayState(__GLXattribute * state)
1954 {
1955 struct array_state_vector *arrays = state->array_state;
1956 struct array_stack_state *stack =
1957 &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1958 unsigned i;
1959
1960 /* XXX are we pushing _all_ the necessary fields? */
1961 for (i = 0; i < arrays->num_arrays; i++) {
1962 stack[i].data = arrays->arrays[i].data;
1963 stack[i].data_type = arrays->arrays[i].data_type;
1964 stack[i].user_stride = arrays->arrays[i].user_stride;
1965 stack[i].count = arrays->arrays[i].count;
1966 stack[i].key = arrays->arrays[i].key;
1967 stack[i].index = arrays->arrays[i].index;
1968 stack[i].enabled = arrays->arrays[i].enabled;
1969 }
1970
1971 arrays->active_texture_unit_stack[arrays->stack_index] =
1972 arrays->active_texture_unit;
1973
1974 arrays->stack_index++;
1975 }
1976
1977
1978 void
1979 __glXPopArrayState(__GLXattribute * state)
1980 {
1981 struct array_state_vector *arrays = state->array_state;
1982 struct array_stack_state *stack;
1983 unsigned i;
1984
1985
1986 arrays->stack_index--;
1987 stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1988
1989 for (i = 0; i < arrays->num_arrays; i++) {
1990 switch (stack[i].key) {
1991 case GL_NORMAL_ARRAY:
1992 __indirect_glNormalPointer(stack[i].data_type,
1993 stack[i].user_stride, stack[i].data);
1994 break;
1995 case GL_COLOR_ARRAY:
1996 __indirect_glColorPointer(stack[i].count,
1997 stack[i].data_type,
1998 stack[i].user_stride, stack[i].data);
1999 break;
2000 case GL_INDEX_ARRAY:
2001 __indirect_glIndexPointer(stack[i].data_type,
2002 stack[i].user_stride, stack[i].data);
2003 break;
2004 case GL_EDGE_FLAG_ARRAY:
2005 __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data);
2006 break;
2007 case GL_TEXTURE_COORD_ARRAY:
2008 arrays->active_texture_unit = stack[i].index;
2009 __indirect_glTexCoordPointer(stack[i].count,
2010 stack[i].data_type,
2011 stack[i].user_stride, stack[i].data);
2012 break;
2013 case GL_SECONDARY_COLOR_ARRAY:
2014 __indirect_glSecondaryColorPointer(stack[i].count,
2015 stack[i].data_type,
2016 stack[i].user_stride,
2017 stack[i].data);
2018 break;
2019 case GL_FOG_COORDINATE_ARRAY:
2020 __indirect_glFogCoordPointer(stack[i].data_type,
2021 stack[i].user_stride, stack[i].data);
2022 break;
2023
2024 }
2025
2026 __glXSetArrayEnable(state, stack[i].key, stack[i].index,
2027 stack[i].enabled);
2028 }
2029
2030 arrays->active_texture_unit =
2031 arrays->active_texture_unit_stack[arrays->stack_index];
2032 }