Merge remote branch 'origin/gallium-0.2' into gallium-0.2
[mesa.git] / src / glx / x11 / indirect_vertex_array.c
1 /*
2 * (C) Copyright IBM Corporation 2004, 2005
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * IBM,
20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <inttypes.h>
27 #include <assert.h>
28 #include <string.h>
29
30 #include "glxclient.h"
31 #include "indirect.h"
32 #include <GL/glxproto.h>
33 #include "glxextensions.h"
34 #include "indirect_vertex_array.h"
35 #include "indirect_vertex_array_priv.h"
36
37 #define __GLX_PAD(n) (((n)+3) & ~3)
38
39 /**
40 * \file indirect_vertex_array.c
41 * Implement GLX protocol for vertex arrays and vertex buffer objects.
42 *
43 * The most important function in this fill is \c fill_array_info_cache.
44 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
45 * in the DrawArrays protocol. Certain operations, such as enabling or
46 * disabling an array, can invalidate this cache. \c fill_array_info_cache
47 * fills-in this data. Additionally, it examines the enabled state and
48 * other factors to determine what "version" of DrawArrays protocoal can be
49 * used.
50 *
51 * Current, only two versions of DrawArrays protocol are implemented. The
52 * first version is the "none" protocol. This is the fallback when the
53 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented
54 * by sending batches of immediate mode commands that are equivalent to the
55 * DrawArrays protocol.
56 *
57 * The other protocol that is currently implemented is the "old" protocol.
58 * This is the GL 1.1 DrawArrays protocol. The only difference between GL
59 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
60 * This protocol is called "old" because the ARB is in the process of
61 * defining a new protocol, which will probably be called wither "new" or
62 * "vbo", to support multiple texture coordinate arrays, generic attributes,
63 * and vertex buffer objects.
64 *
65 * \author Ian Romanick <ian.d.romanick@intel.com>
66 */
67
68 static void emit_DrawArrays_none( GLenum mode, GLint first, GLsizei count );
69 static void emit_DrawArrays_old ( GLenum mode, GLint first, GLsizei count );
70
71 static void emit_DrawElements_none( GLenum mode, GLsizei count, GLenum type,
72 const GLvoid *indices );
73 static void emit_DrawElements_old ( GLenum mode, GLsizei count, GLenum type,
74 const GLvoid *indices );
75
76
77 static GLubyte * emit_element_none( GLubyte * dst,
78 const struct array_state_vector * arrays, unsigned index );
79 static GLubyte * emit_element_old( GLubyte * dst,
80 const struct array_state_vector * arrays, unsigned index );
81 static struct array_state * get_array_entry(
82 const struct array_state_vector * arrays, GLenum key, unsigned index );
83 static void fill_array_info_cache( struct array_state_vector * arrays );
84 static GLboolean validate_mode(__GLXcontext *gc, GLenum mode);
85 static GLboolean validate_count(__GLXcontext *gc, GLsizei count);
86 static GLboolean validate_type(__GLXcontext *gc, GLenum type);
87
88
89 /**
90 * Table of sizes, in bytes, of a GL types. All of the type enums are be in
91 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e.,
92 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the
93 * type enums masked with 0x0f.
94 *
95 * \notes
96 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES,
97 * \c GL_3_BYTES, or \c GL_4_BYTES.
98 */
99 const GLuint __glXTypeSize_table[16] = {
100 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
101 };
102
103
104 /**
105 * Free the per-context array state that was allocated with
106 * __glXInitVertexArrayState().
107 */
108 void
109 __glXFreeVertexArrayState( __GLXcontext * gc )
110 {
111 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
112 struct array_state_vector* arrays = state->array_state;
113
114 if (arrays) {
115 if (arrays->stack) {
116 free(arrays->stack);
117 arrays->stack = NULL;
118 }
119 if (arrays->arrays) {
120 free(arrays->arrays);
121 arrays->arrays = NULL;
122 }
123 free(arrays);
124 arrays = NULL;
125 state->array_state = NULL;
126 }
127 }
128
129
130 /**
131 * Initialize vertex array state of a GLX context.
132 *
133 * \param gc GLX context whose vertex array state is to be initialized.
134 *
135 * \warning
136 * This function may only be called after __GLXcontext::gl_extension_bits,
137 * __GLXcontext::server_minor, and __GLXcontext::server_major have been
138 * initialized. These values are used to determine what vertex arrays are
139 * supported.
140 *
141 * \bug
142 * Return values from malloc are not properly tested.
143 */
144 void
145 __glXInitVertexArrayState( __GLXcontext * gc )
146 {
147 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
148 struct array_state_vector * arrays;
149
150 unsigned array_count;
151 int texture_units = 1, vertex_program_attribs = 0;
152 unsigned i, j;
153
154 GLboolean got_fog = GL_FALSE;
155 GLboolean got_secondary_color = GL_FALSE;
156
157
158 arrays = calloc( 1, sizeof( struct array_state_vector ) );
159 state->array_state = arrays;
160
161 arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
162 arrays->new_DrawArrays_possible = GL_FALSE;
163 arrays->DrawArrays = NULL;
164
165 arrays->active_texture_unit = 0;
166
167
168 /* Determine how many arrays are actually needed. Only arrays that
169 * are supported by the server are create. For example, if the server
170 * supports only 2 texture units, then only 2 texture coordinate arrays
171 * are created.
172 *
173 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
174 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
175 * GL_EDGE_FLAG_ARRAY are supported.
176 */
177
178 array_count = 5;
179
180 if ( __glExtensionBitIsEnabled( gc, GL_EXT_fog_coord_bit )
181 || (gc->server_major > 1) || (gc->server_minor >= 4) ) {
182 got_fog = GL_TRUE;
183 array_count++;
184 }
185
186 if ( __glExtensionBitIsEnabled( gc, GL_EXT_secondary_color_bit )
187 || (gc->server_major > 1) || (gc->server_minor >= 4) ) {
188 got_secondary_color = GL_TRUE;
189 array_count++;
190 }
191
192 if ( __glExtensionBitIsEnabled( gc, GL_ARB_multitexture_bit )
193 || (gc->server_major > 1) || (gc->server_minor >= 3) ) {
194 __indirect_glGetIntegerv( GL_MAX_TEXTURE_UNITS, & texture_units );
195 }
196
197 if ( __glExtensionBitIsEnabled( gc, GL_ARB_vertex_program_bit ) ) {
198 __indirect_glGetProgramivARB( GL_VERTEX_PROGRAM_ARB,
199 GL_MAX_PROGRAM_ATTRIBS_ARB,
200 & vertex_program_attribs );
201 }
202
203 arrays->num_texture_units = texture_units;
204 arrays->num_vertex_program_attribs = vertex_program_attribs;
205 array_count += texture_units + vertex_program_attribs;
206 arrays->num_arrays = array_count;
207 arrays->arrays = calloc( array_count, sizeof( struct array_state ) );
208
209 arrays->arrays[0].data_type = GL_FLOAT;
210 arrays->arrays[0].count = 3;
211 arrays->arrays[0].key = GL_NORMAL_ARRAY;
212 arrays->arrays[0].normalized = GL_TRUE;
213 arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
214
215 arrays->arrays[1].data_type = GL_FLOAT;
216 arrays->arrays[1].count = 4;
217 arrays->arrays[1].key = GL_COLOR_ARRAY;
218 arrays->arrays[1].normalized = GL_TRUE;
219 arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
220
221 arrays->arrays[2].data_type = GL_FLOAT;
222 arrays->arrays[2].count = 1;
223 arrays->arrays[2].key = GL_INDEX_ARRAY;
224 arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
225
226 arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
227 arrays->arrays[3].count = 1;
228 arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
229 arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
230
231 for ( i = 0 ; i < texture_units ; i++ ) {
232 arrays->arrays[4 + i].data_type = GL_FLOAT;
233 arrays->arrays[4 + i].count = 4;
234 arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
235
236 arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
237 arrays->arrays[4 + i].index = i;
238
239 arrays->arrays[4 + i].header[1] = i + GL_TEXTURE0;
240 }
241
242 i = 4 + texture_units;
243
244 if ( got_fog ) {
245 arrays->arrays[i].data_type = GL_FLOAT;
246 arrays->arrays[i].count = 1;
247 arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
248 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
249 i++;
250 }
251
252 if ( got_secondary_color ) {
253 arrays->arrays[i].data_type = GL_FLOAT;
254 arrays->arrays[i].count = 3;
255 arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
256 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
257 arrays->arrays[i].normalized = GL_TRUE;
258 i++;
259 }
260
261
262 for ( j = 0 ; j < vertex_program_attribs ; j++ ) {
263 const unsigned idx = (vertex_program_attribs - (j + 1));
264
265
266 arrays->arrays[idx + i].data_type = GL_FLOAT;
267 arrays->arrays[idx + i].count = 4;
268 arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
269
270 arrays->arrays[idx + i].old_DrawArrays_possible = 0;
271 arrays->arrays[idx + i].index = idx;
272
273 arrays->arrays[idx + i].header[1] = idx;
274 }
275
276 i += vertex_program_attribs;
277
278
279 /* Vertex array *must* be last becuase of the way that
280 * emit_DrawArrays_none works.
281 */
282
283 arrays->arrays[i].data_type = GL_FLOAT;
284 arrays->arrays[i].count = 4;
285 arrays->arrays[i].key = GL_VERTEX_ARRAY;
286 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
287
288 assert( (i + 1) == arrays->num_arrays );
289
290 arrays->stack_index = 0;
291 arrays->stack = malloc( sizeof( struct array_stack_state )
292 * arrays->num_arrays );
293 }
294
295
296 /**
297 * Calculate the size of a single vertex for the "none" protocol. This is
298 * essentially the size of all the immediate-mode commands required to
299 * implement the enabled vertex arrays.
300 */
301 static size_t
302 calculate_single_vertex_size_none( const struct array_state_vector * arrays )
303 {
304 size_t single_vertex_size = 0;
305 unsigned i;
306
307
308 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
309 if ( arrays->arrays[i].enabled ) {
310 single_vertex_size += ((uint16_t *)arrays->arrays[i].header)[0];
311 }
312 }
313
314 return single_vertex_size;
315 }
316
317
318 /**
319 * Emit a single element using non-DrawArrays protocol.
320 */
321 GLubyte *
322 emit_element_none( GLubyte * dst,
323 const struct array_state_vector * arrays,
324 unsigned index )
325 {
326 unsigned i;
327
328
329 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
330 if ( arrays->arrays[i].enabled ) {
331 const size_t offset = index * arrays->arrays[i].true_stride;
332
333 /* The generic attributes can have more data than is in the
334 * elements. This is because a vertex array can be a 2 element,
335 * normalized, unsigned short, but the "closest" immediate mode
336 * protocol is for a 4Nus. Since the sizes are small, the
337 * performance impact on modern processors should be negligible.
338 */
339 (void) memset( dst, 0,
340 ((uint16_t *)arrays->arrays[i].header)[0] );
341
342 (void) memcpy( dst, arrays->arrays[i].header,
343 arrays->arrays[i].header_size );
344
345 dst += arrays->arrays[i].header_size;
346
347 (void) memcpy( dst, ((GLubyte *) arrays->arrays[i].data) + offset,
348 arrays->arrays[i].element_size );
349
350 dst += __GLX_PAD( arrays->arrays[i].element_size );
351 }
352 }
353
354 return dst;
355 }
356
357
358 /**
359 * Emit a single element using "old" DrawArrays protocol from
360 * EXT_vertex_arrays / OpenGL 1.1.
361 */
362 GLubyte *
363 emit_element_old( GLubyte * dst,
364 const struct array_state_vector * arrays,
365 unsigned index )
366 {
367 unsigned i;
368
369
370 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
371 if ( arrays->arrays[i].enabled ) {
372 const size_t offset = index * arrays->arrays[i].true_stride;
373
374 (void) memcpy( dst, ((GLubyte *) arrays->arrays[i].data) + offset,
375 arrays->arrays[i].element_size );
376
377 dst += __GLX_PAD( arrays->arrays[i].element_size );
378 }
379 }
380
381 return dst;
382 }
383
384
385 struct array_state *
386 get_array_entry( const struct array_state_vector * arrays,
387 GLenum key, unsigned index )
388 {
389 unsigned i;
390
391 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
392 if ( (arrays->arrays[i].key == key)
393 && (arrays->arrays[i].index == index) ) {
394 return & arrays->arrays[i];
395 }
396 }
397
398 return NULL;
399 }
400
401
402 static GLboolean
403 allocate_array_info_cache( struct array_state_vector * arrays,
404 size_t required_size )
405 {
406 #define MAX_HEADER_SIZE 20
407 if ( arrays->array_info_cache_buffer_size < required_size ) {
408 GLubyte * temp = realloc( arrays->array_info_cache_base,
409 required_size + MAX_HEADER_SIZE );
410
411 if ( temp == NULL ) {
412 return GL_FALSE;
413 }
414
415 arrays->array_info_cache_base = temp;
416 arrays->array_info_cache = temp + MAX_HEADER_SIZE;
417 arrays->array_info_cache_buffer_size = required_size;
418 }
419
420 arrays->array_info_cache_size = required_size;
421 return GL_TRUE;
422 }
423
424
425 /**
426 */
427 void
428 fill_array_info_cache( struct array_state_vector * arrays )
429 {
430 GLboolean old_DrawArrays_possible;
431 unsigned i;
432
433
434 /* Determine how many arrays are enabled.
435 */
436
437 arrays->enabled_client_array_count = 0;
438 old_DrawArrays_possible = arrays->old_DrawArrays_possible;
439 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
440 if ( arrays->arrays[i].enabled ) {
441 arrays->enabled_client_array_count++;
442 old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
443 }
444 }
445
446 if ( arrays->new_DrawArrays_possible ) {
447 assert( ! arrays->new_DrawArrays_possible );
448 }
449 else if ( old_DrawArrays_possible ) {
450 const size_t required_size = arrays->enabled_client_array_count * 12;
451 uint32_t * info;
452
453
454 if ( ! allocate_array_info_cache( arrays, required_size ) ) {
455 return;
456 }
457
458
459 info = (uint32_t *) arrays->array_info_cache;
460 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
461 if ( arrays->arrays[i].enabled ) {
462 *(info++) = arrays->arrays[i].data_type;
463 *(info++) = arrays->arrays[i].count;
464 *(info++) = arrays->arrays[i].key;
465 }
466 }
467
468 arrays->DrawArrays = emit_DrawArrays_old;
469 arrays->DrawElements = emit_DrawElements_old;
470 }
471 else {
472 arrays->DrawArrays = emit_DrawArrays_none;
473 arrays->DrawElements = emit_DrawElements_none;
474 }
475
476 arrays->array_info_cache_valid = GL_TRUE;
477 }
478
479
480 /**
481 * Emit a \c glDrawArrays command using the "none" protocol. That is,
482 * emit immediate-mode commands that are equivalent to the requiested
483 * \c glDrawArrays command. This is used with servers that don't support
484 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
485 * vertex state is enabled that is not compatible with that protocol.
486 */
487 void
488 emit_DrawArrays_none( GLenum mode, GLint first, GLsizei count )
489 {
490 __GLXcontext *gc = __glXGetCurrentContext();
491 const __GLXattribute * state =
492 (const __GLXattribute *)(gc->client_state_private);
493 struct array_state_vector * arrays = state->array_state;
494
495 size_t single_vertex_size;
496 GLubyte * pc;
497 unsigned i;
498 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
499 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
500
501
502 single_vertex_size = calculate_single_vertex_size_none( arrays );
503
504 pc = gc->pc;
505
506 (void) memcpy( pc, begin_cmd, 4 );
507 *(int *)(pc + 4) = mode;
508
509 pc += 8;
510
511 for ( i = 0 ; i < count ; i++ ) {
512 if ( (pc + single_vertex_size) >= gc->bufEnd ) {
513 pc = __glXFlushRenderBuffer(gc, pc);
514 }
515
516 pc = emit_element_none( pc, arrays, first + i );
517 }
518
519 if ( (pc + 4) >= gc->bufEnd ) {
520 pc = __glXFlushRenderBuffer(gc, pc);
521 }
522
523 (void) memcpy( pc, end_cmd, 4 );
524 pc += 4;
525
526 gc->pc = pc;
527 if ( gc->pc > gc->limit ) {
528 (void) __glXFlushRenderBuffer(gc, gc->pc);
529 }
530 }
531
532
533 /**
534 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
535 * protocol.
536 *
537 * \param gc GLX context.
538 * \param arrays Array state.
539 * \param elements_per_request Location to store the number of elements that
540 * can fit in a single Render / RenderLarge
541 * command.
542 * \param total_request Total number of requests for a RenderLarge
543 * command. If a Render command is used, this
544 * will be zero.
545 * \param mode Drawing mode.
546 * \param count Number of vertices.
547 *
548 * \returns
549 * A pointer to the buffer for array data.
550 */
551 static GLubyte *
552 emit_DrawArrays_header_old( __GLXcontext * gc,
553 struct array_state_vector * arrays,
554 size_t * elements_per_request,
555 unsigned int * total_requests,
556 GLenum mode, GLsizei count )
557 {
558 size_t command_size;
559 size_t single_vertex_size;
560 const unsigned header_size = 16;
561 unsigned i;
562 GLubyte * pc;
563
564
565 /* Determine the size of the whole command. This includes the header,
566 * the ARRAY_INFO data and the array data. Once this size is calculated,
567 * it will be known whether a Render or RenderLarge command is needed.
568 */
569
570 single_vertex_size = 0;
571 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
572 if ( arrays->arrays[i].enabled ) {
573 single_vertex_size += __GLX_PAD( arrays->arrays[i].element_size );
574 }
575 }
576
577 command_size = arrays->array_info_cache_size + header_size
578 + (single_vertex_size * count);
579
580
581 /* Write the header for either a Render command or a RenderLarge
582 * command. After the header is written, write the ARRAY_INFO data.
583 */
584
585 if ( command_size > gc->maxSmallRenderCommandSize ) {
586 /* maxSize is the maximum amount of data can be stuffed into a single
587 * packet. sz_xGLXRenderReq is added because bufSize is the maximum
588 * packet size minus sz_xGLXRenderReq.
589 */
590 const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
591 - sz_xGLXRenderLargeReq;
592 unsigned vertex_requests;
593
594
595 /* Calculate the number of data packets that will be required to send
596 * the whole command. To do this, the number of verticies that
597 * will fit in a single buffer must be calculated.
598 *
599 * The important value here is elements_per_request. This is the
600 * number of complete array elements that will fit in a single
601 * buffer. There may be some wasted space at the end of the buffer,
602 * but splitting elements across buffer boundries would be painful.
603 */
604
605 elements_per_request[0] = maxSize / single_vertex_size;
606
607 vertex_requests = (count + elements_per_request[0] - 1)
608 / elements_per_request[0];
609
610 *total_requests = vertex_requests + 1;
611
612
613 __glXFlushRenderBuffer(gc, gc->pc);
614
615 command_size += 4;
616
617 pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
618 *(uint32_t *)(pc + 0) = command_size;
619 *(uint32_t *)(pc + 4) = X_GLrop_DrawArrays;
620 *(uint32_t *)(pc + 8) = count;
621 *(uint32_t *)(pc + 12) = arrays->enabled_client_array_count;
622 *(uint32_t *)(pc + 16) = mode;
623
624 __glXSendLargeChunk( gc, 1, *total_requests, pc,
625 header_size + 4 + arrays->array_info_cache_size );
626
627 pc = gc->pc;
628 }
629 else {
630 if ( (gc->pc + command_size) >= gc->bufEnd ) {
631 (void) __glXFlushRenderBuffer(gc, gc->pc);
632 }
633
634 pc = gc->pc;
635 *(uint16_t *)(pc + 0) = command_size;
636 *(uint16_t *)(pc + 2) = X_GLrop_DrawArrays;
637 *(uint32_t *)(pc + 4) = count;
638 *(uint32_t *)(pc + 8) = arrays->enabled_client_array_count;
639 *(uint32_t *)(pc + 12) = mode;
640
641 pc += header_size;
642
643 (void) memcpy( pc, arrays->array_info_cache,
644 arrays->array_info_cache_size );
645 pc += arrays->array_info_cache_size;
646
647 *elements_per_request = count;
648 *total_requests = 0;
649 }
650
651
652 return pc;
653 }
654
655
656 /**
657 */
658 void
659 emit_DrawArrays_old( GLenum mode, GLint first, GLsizei count )
660 {
661 __GLXcontext *gc = __glXGetCurrentContext();
662 const __GLXattribute * state =
663 (const __GLXattribute *)(gc->client_state_private);
664 struct array_state_vector * arrays = state->array_state;
665
666 GLubyte * pc;
667 size_t elements_per_request;
668 unsigned total_requests = 0;
669 unsigned i;
670 size_t total_sent = 0;
671
672
673 pc = emit_DrawArrays_header_old( gc, arrays, & elements_per_request,
674 & total_requests, mode, count);
675
676
677 /* Write the arrays.
678 */
679
680 if ( total_requests == 0 ) {
681 assert( elements_per_request >= count );
682
683 for ( i = 0 ; i < count ; i++ ) {
684 pc = emit_element_old( pc, arrays, i + first );
685 }
686
687 assert( pc <= gc->bufEnd );
688
689 gc->pc = pc;
690 if ( gc->pc > gc->limit ) {
691 (void) __glXFlushRenderBuffer(gc, gc->pc);
692 }
693 }
694 else {
695 unsigned req;
696
697
698 for ( req = 2 ; req <= total_requests ; req++ ) {
699 if ( count < elements_per_request ) {
700 elements_per_request = count;
701 }
702
703 pc = gc->pc;
704 for ( i = 0 ; i < elements_per_request ; i++ ) {
705 pc = emit_element_old( pc, arrays, i + first );
706 }
707
708 first += elements_per_request;
709
710 total_sent += (size_t) (pc - gc->pc);
711 __glXSendLargeChunk( gc, req, total_requests, gc->pc,
712 pc - gc->pc );
713
714 count -= elements_per_request;
715 }
716 }
717 }
718
719
720 void
721 emit_DrawElements_none( GLenum mode, GLsizei count, GLenum type,
722 const GLvoid *indices )
723 {
724 __GLXcontext *gc = __glXGetCurrentContext();
725 const __GLXattribute * state =
726 (const __GLXattribute *)(gc->client_state_private);
727 struct array_state_vector * arrays = state->array_state;
728 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
729 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
730
731 GLubyte * pc;
732 size_t single_vertex_size;
733 unsigned i;
734
735
736 single_vertex_size = calculate_single_vertex_size_none( arrays );
737
738
739 if ( (gc->pc + single_vertex_size) >= gc->bufEnd ) {
740 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
741 }
742
743 pc = gc->pc;
744
745 (void) memcpy( pc, begin_cmd, 4 );
746 *(int *)(pc + 4) = mode;
747
748 pc += 8;
749
750 for ( i = 0 ; i < count ; i++ ) {
751 unsigned index = 0;
752
753 if ( (pc + single_vertex_size) >= gc->bufEnd ) {
754 pc = __glXFlushRenderBuffer(gc, pc);
755 }
756
757 switch( type ) {
758 case GL_UNSIGNED_INT:
759 index = (unsigned) (((GLuint *) indices)[i]);
760 break;
761 case GL_UNSIGNED_SHORT:
762 index = (unsigned) (((GLushort *) indices)[i]);
763 break;
764 case GL_UNSIGNED_BYTE:
765 index = (unsigned) (((GLubyte *) indices)[i]);
766 break;
767 }
768 pc = emit_element_none( pc, arrays, index );
769 }
770
771 if ( (pc + 4) >= gc->bufEnd ) {
772 pc = __glXFlushRenderBuffer(gc, pc);
773 }
774
775 (void) memcpy( pc, end_cmd, 4 );
776 pc += 4;
777
778 gc->pc = pc;
779 if ( gc->pc > gc->limit ) {
780 (void) __glXFlushRenderBuffer(gc, gc->pc);
781 }
782 }
783
784
785 /**
786 */
787 void
788 emit_DrawElements_old( GLenum mode, GLsizei count, GLenum type,
789 const GLvoid *indices )
790 {
791 __GLXcontext *gc = __glXGetCurrentContext();
792 const __GLXattribute * state =
793 (const __GLXattribute *)(gc->client_state_private);
794 struct array_state_vector * arrays = state->array_state;
795
796 GLubyte * pc;
797 size_t elements_per_request;
798 unsigned total_requests = 0;
799 unsigned i;
800 unsigned req;
801 unsigned req_element=0;
802
803
804 pc = emit_DrawArrays_header_old( gc, arrays, & elements_per_request,
805 & total_requests, mode, count);
806
807
808 /* Write the arrays.
809 */
810
811 req = 2;
812 while ( count > 0 ) {
813 if ( count < elements_per_request ) {
814 elements_per_request = count;
815 }
816
817 switch( type ) {
818 case GL_UNSIGNED_INT: {
819 const GLuint * ui_ptr = (const GLuint *) indices + req_element;
820
821 for ( i = 0 ; i < elements_per_request ; i++ ) {
822 const GLint index = (GLint) *(ui_ptr++);
823 pc = emit_element_old( pc, arrays, index );
824 }
825 break;
826 }
827 case GL_UNSIGNED_SHORT: {
828 const GLushort * us_ptr = (const GLushort *) indices + req_element;
829
830 for ( i = 0 ; i < elements_per_request ; i++ ) {
831 const GLint index = (GLint) *(us_ptr++);
832 pc = emit_element_old( pc, arrays, index );
833 }
834 break;
835 }
836 case GL_UNSIGNED_BYTE: {
837 const GLubyte * ub_ptr = (const GLubyte *) indices + req_element;
838
839 for ( i = 0 ; i < elements_per_request ; i++ ) {
840 const GLint index = (GLint) *(ub_ptr++);
841 pc = emit_element_old( pc, arrays, index );
842 }
843 break;
844 }
845 }
846
847 if ( total_requests != 0 ) {
848 __glXSendLargeChunk( gc, req, total_requests, gc->pc,
849 pc - gc->pc );
850 pc = gc->pc;
851 req++;
852 }
853
854 count -= elements_per_request;
855 req_element += elements_per_request;
856 }
857
858
859 assert( (total_requests == 0) || ((req - 1) == total_requests) );
860
861 if ( total_requests == 0 ) {
862 assert( pc <= gc->bufEnd );
863
864 gc->pc = pc;
865 if ( gc->pc > gc->limit ) {
866 (void) __glXFlushRenderBuffer(gc, gc->pc);
867 }
868 }
869 }
870
871
872 /**
873 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
874 * If it is not valid, then an error code is set in the GLX context.
875 *
876 * \returns
877 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
878 */
879 static GLboolean
880 validate_mode(__GLXcontext *gc, GLenum mode)
881 {
882 switch(mode) {
883 case GL_POINTS:
884 case GL_LINE_STRIP:
885 case GL_LINE_LOOP:
886 case GL_LINES:
887 case GL_TRIANGLE_STRIP:
888 case GL_TRIANGLE_FAN:
889 case GL_TRIANGLES:
890 case GL_QUAD_STRIP:
891 case GL_QUADS:
892 case GL_POLYGON:
893 break;
894 default:
895 __glXSetError(gc, GL_INVALID_ENUM);
896 return GL_FALSE;
897 }
898
899 return GL_TRUE;
900 }
901
902
903 /**
904 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
905 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
906 * being set. A value of zero will not result in an error being set, but
907 * will result in \c GL_FALSE being returned.
908 *
909 * \returns
910 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
911 */
912 static GLboolean
913 validate_count(__GLXcontext *gc, GLsizei count)
914 {
915 if (count < 0) {
916 __glXSetError(gc, GL_INVALID_VALUE);
917 }
918
919 return (count > 0);
920 }
921
922
923 /**
924 * Validate that the \c type parameter to \c glDrawElements, et. al. is
925 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
926 * \c GL_UNSIGNED_INT are valid.
927 *
928 * \returns
929 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
930 */
931 static GLboolean validate_type(__GLXcontext *gc, GLenum type)
932 {
933 switch( type ) {
934 case GL_UNSIGNED_INT:
935 case GL_UNSIGNED_SHORT:
936 case GL_UNSIGNED_BYTE:
937 return GL_TRUE;
938 default:
939 __glXSetError(gc, GL_INVALID_ENUM);
940 return GL_FALSE;
941 }
942 }
943
944
945 void __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
946 {
947 __GLXcontext *gc = __glXGetCurrentContext();
948 const __GLXattribute * state =
949 (const __GLXattribute *)(gc->client_state_private);
950 struct array_state_vector * arrays = state->array_state;
951
952
953 if ( validate_mode(gc, mode) && validate_count(gc, count) ) {
954 if ( ! arrays->array_info_cache_valid ) {
955 fill_array_info_cache( arrays );
956 }
957
958 arrays->DrawArrays(mode, first, count);
959 }
960 }
961
962
963 void __indirect_glArrayElement(GLint index)
964 {
965 __GLXcontext *gc = __glXGetCurrentContext();
966 const __GLXattribute * state =
967 (const __GLXattribute *)(gc->client_state_private);
968 struct array_state_vector * arrays = state->array_state;
969
970 size_t single_vertex_size;
971
972
973 single_vertex_size = calculate_single_vertex_size_none( arrays );
974
975 if ( (gc->pc + single_vertex_size) >= gc->bufEnd ) {
976 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
977 }
978
979 gc->pc = emit_element_none( gc->pc, arrays, index );
980
981 if ( gc->pc > gc->limit ) {
982 (void) __glXFlushRenderBuffer(gc, gc->pc);
983 }
984 }
985
986
987 void __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
988 const GLvoid *indices)
989 {
990 __GLXcontext *gc = __glXGetCurrentContext();
991 const __GLXattribute * state =
992 (const __GLXattribute *)(gc->client_state_private);
993 struct array_state_vector * arrays = state->array_state;
994
995
996 if ( validate_mode(gc, mode) && validate_count(gc, count)
997 && validate_type(gc, type) ) {
998 if ( ! arrays->array_info_cache_valid ) {
999 fill_array_info_cache( arrays );
1000 }
1001
1002 arrays->DrawElements(mode, count, type, indices);
1003 }
1004 }
1005
1006
1007 void __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
1008 GLsizei count, GLenum type,
1009 const GLvoid *indices)
1010 {
1011 __GLXcontext *gc = __glXGetCurrentContext();
1012 const __GLXattribute * state =
1013 (const __GLXattribute *)(gc->client_state_private);
1014 struct array_state_vector * arrays = state->array_state;
1015
1016
1017 if ( validate_mode(gc, mode) && validate_count(gc, count)
1018 && validate_type(gc, type) ) {
1019 if (end < start) {
1020 __glXSetError(gc, GL_INVALID_VALUE);
1021 return;
1022 }
1023
1024 if ( ! arrays->array_info_cache_valid ) {
1025 fill_array_info_cache( arrays );
1026 }
1027
1028 arrays->DrawElements(mode, count, type, indices);
1029 }
1030 }
1031
1032
1033 void __indirect_glMultiDrawArraysEXT(GLenum mode, GLint *first, GLsizei *count,
1034 GLsizei primcount)
1035 {
1036 __GLXcontext *gc = __glXGetCurrentContext();
1037 const __GLXattribute * state =
1038 (const __GLXattribute *)(gc->client_state_private);
1039 struct array_state_vector * arrays = state->array_state;
1040 GLsizei i;
1041
1042
1043 if ( validate_mode(gc, mode) ) {
1044 if ( ! arrays->array_info_cache_valid ) {
1045 fill_array_info_cache( arrays );
1046 }
1047
1048 for ( i = 0 ; i < primcount ; i++ ) {
1049 if ( validate_count( gc, count[i] ) ) {
1050 arrays->DrawArrays(mode, first[i], count[i]);
1051 }
1052 }
1053 }
1054 }
1055
1056
1057 void __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei *count,
1058 GLenum type, const GLvoid ** indices,
1059 GLsizei primcount)
1060 {
1061 __GLXcontext *gc = __glXGetCurrentContext();
1062 const __GLXattribute * state =
1063 (const __GLXattribute *)(gc->client_state_private);
1064 struct array_state_vector * arrays = state->array_state;
1065 GLsizei i;
1066
1067
1068 if ( validate_mode(gc, mode) && validate_type(gc, type) ) {
1069 if ( ! arrays->array_info_cache_valid ) {
1070 fill_array_info_cache( arrays );
1071 }
1072
1073 for ( i = 0 ; i < primcount ; i++ ) {
1074 if ( validate_count( gc, count[i] ) ) {
1075 arrays->DrawElements(mode, count[i], type, indices[i]);
1076 }
1077 }
1078 }
1079 }
1080
1081
1082 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
1083 do { \
1084 (a)->data = PTR; \
1085 (a)->data_type = TYPE; \
1086 (a)->user_stride = STRIDE; \
1087 (a)->count = COUNT; \
1088 (a)->normalized = NORMALIZED; \
1089 \
1090 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \
1091 (a)->true_stride = (STRIDE == 0) \
1092 ? (a)->element_size : STRIDE; \
1093 \
1094 (a)->header_size = HDR_SIZE; \
1095 ((uint16_t *) (a)->header)[0] = __GLX_PAD((a)->header_size + (a)->element_size); \
1096 ((uint16_t *) (a)->header)[1] = OPCODE; \
1097 } while(0)
1098
1099
1100 void __indirect_glVertexPointer( GLint size, GLenum type, GLsizei stride,
1101 const GLvoid * pointer )
1102 {
1103 static const uint16_t short_ops[5] = {
1104 0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
1105 };
1106 static const uint16_t int_ops[5] = {
1107 0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
1108 };
1109 static const uint16_t float_ops[5] = {
1110 0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
1111 };
1112 static const uint16_t double_ops[5] = {
1113 0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
1114 };
1115 uint16_t opcode;
1116 __GLXcontext *gc = __glXGetCurrentContext();
1117 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1118 struct array_state_vector * arrays = state->array_state;
1119 struct array_state * a;
1120
1121
1122 if (size < 2 || size > 4 || stride < 0) {
1123 __glXSetError(gc, GL_INVALID_VALUE);
1124 return;
1125 }
1126
1127 switch ( type ) {
1128 case GL_SHORT: opcode = short_ops[size]; break;
1129 case GL_INT: opcode = int_ops[size]; break;
1130 case GL_FLOAT: opcode = float_ops[size]; break;
1131 case GL_DOUBLE: opcode = double_ops[size]; break;
1132 default:
1133 __glXSetError(gc, GL_INVALID_ENUM);
1134 return;
1135 }
1136
1137 a = get_array_entry( arrays, GL_VERTEX_ARRAY, 0 );
1138 assert( a != NULL );
1139 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, size, GL_FALSE, 4,
1140 opcode );
1141
1142 if ( a->enabled ) {
1143 arrays->array_info_cache_valid = GL_FALSE;
1144 }
1145 }
1146
1147
1148 void __indirect_glNormalPointer( GLenum type, GLsizei stride,
1149 const GLvoid * pointer )
1150 {
1151 uint16_t opcode;
1152 __GLXcontext *gc = __glXGetCurrentContext();
1153 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1154 struct array_state_vector * arrays = state->array_state;
1155 struct array_state * a;
1156
1157
1158 if (stride < 0) {
1159 __glXSetError(gc, GL_INVALID_VALUE);
1160 return;
1161 }
1162
1163 switch ( type ) {
1164 case GL_BYTE: opcode = X_GLrop_Normal3bv; break;
1165 case GL_SHORT: opcode = X_GLrop_Normal3sv; break;
1166 case GL_INT: opcode = X_GLrop_Normal3iv; break;
1167 case GL_FLOAT: opcode = X_GLrop_Normal3fv; break;
1168 case GL_DOUBLE: opcode = X_GLrop_Normal3dv; break;
1169 default:
1170 __glXSetError(gc, GL_INVALID_ENUM);
1171 return;
1172 }
1173
1174 a = get_array_entry( arrays, GL_NORMAL_ARRAY, 0 );
1175 assert( a != NULL );
1176 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, 3, GL_TRUE, 4,
1177 opcode );
1178
1179 if ( a->enabled ) {
1180 arrays->array_info_cache_valid = GL_FALSE;
1181 }
1182 }
1183
1184
1185 void __indirect_glColorPointer( GLint size, GLenum type, GLsizei stride,
1186 const GLvoid * pointer )
1187 {
1188 static const uint16_t byte_ops[5] = {
1189 0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
1190 };
1191 static const uint16_t ubyte_ops[5] = {
1192 0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
1193 };
1194 static const uint16_t short_ops[5] = {
1195 0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
1196 };
1197 static const uint16_t ushort_ops[5] = {
1198 0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
1199 };
1200 static const uint16_t int_ops[5] = {
1201 0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
1202 };
1203 static const uint16_t uint_ops[5] = {
1204 0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
1205 };
1206 static const uint16_t float_ops[5] = {
1207 0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
1208 };
1209 static const uint16_t double_ops[5] = {
1210 0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
1211 };
1212 uint16_t opcode;
1213 __GLXcontext *gc = __glXGetCurrentContext();
1214 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1215 struct array_state_vector * arrays = state->array_state;
1216 struct array_state * a;
1217
1218
1219 if (size < 3 || size > 4 || stride < 0) {
1220 __glXSetError(gc, GL_INVALID_VALUE);
1221 return;
1222 }
1223
1224 switch ( type ) {
1225 case GL_BYTE: opcode = byte_ops[size]; break;
1226 case GL_UNSIGNED_BYTE: opcode = ubyte_ops[size]; break;
1227 case GL_SHORT: opcode = short_ops[size]; break;
1228 case GL_UNSIGNED_SHORT: opcode = ushort_ops[size]; break;
1229 case GL_INT: opcode = int_ops[size]; break;
1230 case GL_UNSIGNED_INT: opcode = uint_ops[size]; break;
1231 case GL_FLOAT: opcode = float_ops[size]; break;
1232 case GL_DOUBLE: opcode = double_ops[size]; break;
1233 default:
1234 __glXSetError(gc, GL_INVALID_ENUM);
1235 return;
1236 }
1237
1238 a = get_array_entry( arrays, GL_COLOR_ARRAY, 0 );
1239 assert( a != NULL );
1240 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, size, GL_TRUE, 4,
1241 opcode );
1242
1243 if ( a->enabled ) {
1244 arrays->array_info_cache_valid = GL_FALSE;
1245 }
1246 }
1247
1248
1249 void __indirect_glIndexPointer( GLenum type, GLsizei stride,
1250 const GLvoid * pointer )
1251 {
1252 uint16_t opcode;
1253 __GLXcontext *gc = __glXGetCurrentContext();
1254 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1255 struct array_state_vector * arrays = state->array_state;
1256 struct array_state * a;
1257
1258
1259 if (stride < 0) {
1260 __glXSetError(gc, GL_INVALID_VALUE);
1261 return;
1262 }
1263
1264 switch ( type ) {
1265 case GL_UNSIGNED_BYTE: opcode = X_GLrop_Indexubv; break;
1266 case GL_SHORT: opcode = X_GLrop_Indexsv; break;
1267 case GL_INT: opcode = X_GLrop_Indexiv; break;
1268 case GL_FLOAT: opcode = X_GLrop_Indexfv; break;
1269 case GL_DOUBLE: opcode = X_GLrop_Indexdv; break;
1270 default:
1271 __glXSetError(gc, GL_INVALID_ENUM);
1272 return;
1273 }
1274
1275 a = get_array_entry( arrays, GL_INDEX_ARRAY, 0 );
1276 assert( a != NULL );
1277 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, 1, GL_FALSE, 4,
1278 opcode );
1279
1280 if ( a->enabled ) {
1281 arrays->array_info_cache_valid = GL_FALSE;
1282 }
1283 }
1284
1285
1286 void __indirect_glEdgeFlagPointer( GLsizei stride, const GLvoid * pointer )
1287 {
1288 __GLXcontext *gc = __glXGetCurrentContext();
1289 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1290 struct array_state_vector * arrays = state->array_state;
1291 struct array_state * a;
1292
1293
1294 if (stride < 0) {
1295 __glXSetError(gc, GL_INVALID_VALUE);
1296 return;
1297 }
1298
1299
1300 a = get_array_entry( arrays, GL_EDGE_FLAG_ARRAY, 0 );
1301 assert( a != NULL );
1302 COMMON_ARRAY_DATA_INIT( a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
1303 4, X_GLrop_EdgeFlagv );
1304
1305 if ( a->enabled ) {
1306 arrays->array_info_cache_valid = GL_FALSE;
1307 }
1308 }
1309
1310
1311 void __indirect_glTexCoordPointer( GLint size, GLenum type, GLsizei stride,
1312 const GLvoid * pointer )
1313 {
1314 static const uint16_t short_ops[5] = {
1315 0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv, X_GLrop_TexCoord4sv
1316 };
1317 static const uint16_t int_ops[5] = {
1318 0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv, X_GLrop_TexCoord4iv
1319 };
1320 static const uint16_t float_ops[5] = {
1321 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv, X_GLrop_TexCoord4fv
1322 };
1323 static const uint16_t double_ops[5] = {
1324 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv, X_GLrop_TexCoord4dv
1325 };
1326
1327 static const uint16_t mshort_ops[5] = {
1328 0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB, X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
1329 };
1330 static const uint16_t mint_ops[5] = {
1331 0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB, X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
1332 };
1333 static const uint16_t mfloat_ops[5] = {
1334 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2fvARB, X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
1335 };
1336 static const uint16_t mdouble_ops[5] = {
1337 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB, X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
1338 };
1339
1340 uint16_t opcode;
1341 __GLXcontext *gc = __glXGetCurrentContext();
1342 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1343 struct array_state_vector * arrays = state->array_state;
1344 struct array_state * a;
1345 unsigned header_size;
1346 unsigned index;
1347
1348
1349 if (size < 1 || size > 4 || stride < 0) {
1350 __glXSetError(gc, GL_INVALID_VALUE);
1351 return;
1352 }
1353
1354 index = arrays->active_texture_unit;
1355 if ( index == 0 ) {
1356 switch ( type ) {
1357 case GL_SHORT: opcode = short_ops[size]; break;
1358 case GL_INT: opcode = int_ops[size]; break;
1359 case GL_FLOAT: opcode = float_ops[size]; break;
1360 case GL_DOUBLE: opcode = double_ops[size]; break;
1361 default:
1362 __glXSetError(gc, GL_INVALID_ENUM);
1363 return;
1364 }
1365
1366 header_size = 4;
1367 }
1368 else {
1369 switch ( type ) {
1370 case GL_SHORT: opcode = mshort_ops[size]; break;
1371 case GL_INT: opcode = mint_ops[size]; break;
1372 case GL_FLOAT: opcode = mfloat_ops[size]; break;
1373 case GL_DOUBLE: opcode = mdouble_ops[size]; break;
1374 default:
1375 __glXSetError(gc, GL_INVALID_ENUM);
1376 return;
1377 }
1378
1379 header_size = 8;
1380 }
1381
1382 a = get_array_entry( arrays, GL_TEXTURE_COORD_ARRAY, index );
1383 assert( a != NULL );
1384 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, size, GL_FALSE,
1385 header_size, opcode );
1386
1387 if ( a->enabled ) {
1388 arrays->array_info_cache_valid = GL_FALSE;
1389 }
1390 }
1391
1392
1393 void __indirect_glSecondaryColorPointerEXT( GLint size, GLenum type, GLsizei stride,
1394 const GLvoid * pointer )
1395 {
1396 uint16_t opcode;
1397 __GLXcontext *gc = __glXGetCurrentContext();
1398 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1399 struct array_state_vector * arrays = state->array_state;
1400 struct array_state * a;
1401
1402
1403 if (size != 3 || stride < 0) {
1404 __glXSetError(gc, GL_INVALID_VALUE);
1405 return;
1406 }
1407
1408 switch ( type ) {
1409 case GL_BYTE: opcode = 4126; break;
1410 case GL_UNSIGNED_BYTE: opcode = 4131; break;
1411 case GL_SHORT: opcode = 4127; break;
1412 case GL_UNSIGNED_SHORT: opcode = 4132; break;
1413 case GL_INT: opcode = 4128; break;
1414 case GL_UNSIGNED_INT: opcode = 4133; break;
1415 case GL_FLOAT: opcode = 4129; break;
1416 case GL_DOUBLE: opcode = 4130; break;
1417 default:
1418 __glXSetError(gc, GL_INVALID_ENUM);
1419 return;
1420 }
1421
1422 a = get_array_entry( arrays, GL_SECONDARY_COLOR_ARRAY, 0 );
1423 if ( a == NULL ) {
1424 __glXSetError(gc, GL_INVALID_OPERATION);
1425 return;
1426 }
1427
1428 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, size, GL_TRUE, 4,
1429 opcode );
1430
1431 if ( a->enabled ) {
1432 arrays->array_info_cache_valid = GL_FALSE;
1433 }
1434 }
1435
1436
1437 void __indirect_glFogCoordPointerEXT( GLenum type, GLsizei stride,
1438 const GLvoid * pointer )
1439 {
1440 uint16_t opcode;
1441 __GLXcontext *gc = __glXGetCurrentContext();
1442 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1443 struct array_state_vector * arrays = state->array_state;
1444 struct array_state * a;
1445
1446
1447 if (stride < 0) {
1448 __glXSetError(gc, GL_INVALID_VALUE);
1449 return;
1450 }
1451
1452 switch ( type ) {
1453 case GL_FLOAT: opcode = 4124; break;
1454 case GL_DOUBLE: opcode = 4125; break;
1455 default:
1456 __glXSetError(gc, GL_INVALID_ENUM);
1457 return;
1458 }
1459
1460 a = get_array_entry( arrays, GL_FOG_COORD_ARRAY, 0 );
1461 if ( a == NULL ) {
1462 __glXSetError(gc, GL_INVALID_OPERATION);
1463 return;
1464 }
1465
1466 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, 1, GL_FALSE, 4,
1467 opcode );
1468
1469 if ( a->enabled ) {
1470 arrays->array_info_cache_valid = GL_FALSE;
1471 }
1472 }
1473
1474
1475 void __indirect_glVertexAttribPointerARB(GLuint index, GLint size,
1476 GLenum type, GLboolean normalized,
1477 GLsizei stride,
1478 const GLvoid * pointer)
1479 {
1480 static const uint16_t short_ops[5] = { 0, 4189, 4190, 4191, 4192 };
1481 static const uint16_t float_ops[5] = { 0, 4193, 4194, 4195, 4196 };
1482 static const uint16_t double_ops[5] = { 0, 4197, 4198, 4199, 4200 };
1483
1484 uint16_t opcode;
1485 __GLXcontext *gc = __glXGetCurrentContext();
1486 __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
1487 struct array_state_vector * arrays = state->array_state;
1488 struct array_state * a;
1489 unsigned true_immediate_count;
1490 unsigned true_immediate_size;
1491
1492
1493 if ( (size < 1) || (size > 4) || (stride < 0)
1494 || (index > arrays->num_vertex_program_attribs) ){
1495 __glXSetError(gc, GL_INVALID_VALUE);
1496 return;
1497 }
1498
1499 if ( normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
1500 switch( type ) {
1501 case GL_BYTE: opcode = X_GLrop_VertexAttrib4NbvARB; break;
1502 case GL_UNSIGNED_BYTE: opcode = X_GLrop_VertexAttrib4NubvARB; break;
1503 case GL_SHORT: opcode = X_GLrop_VertexAttrib4NsvARB; break;
1504 case GL_UNSIGNED_SHORT: opcode = X_GLrop_VertexAttrib4NusvARB; break;
1505 case GL_INT: opcode = X_GLrop_VertexAttrib4NivARB; break;
1506 case GL_UNSIGNED_INT: opcode = X_GLrop_VertexAttrib4NuivARB; break;
1507 default:
1508 __glXSetError(gc, GL_INVALID_ENUM);
1509 return;
1510 }
1511
1512 true_immediate_count = 4;
1513 }
1514 else {
1515 true_immediate_count = size;
1516
1517 switch( type ) {
1518 case GL_BYTE:
1519 opcode = X_GLrop_VertexAttrib4bvARB;
1520 true_immediate_count = 4;
1521 break;
1522 case GL_UNSIGNED_BYTE:
1523 opcode = X_GLrop_VertexAttrib4ubvARB;
1524 true_immediate_count = 4;
1525 break;
1526 case GL_SHORT:
1527 opcode = short_ops[size];
1528 break;
1529 case GL_UNSIGNED_SHORT:
1530 opcode = X_GLrop_VertexAttrib4usvARB;
1531 true_immediate_count = 4;
1532 break;
1533 case GL_INT:
1534 opcode = X_GLrop_VertexAttrib4ivARB;
1535 true_immediate_count = 4;
1536 break;
1537 case GL_UNSIGNED_INT:
1538 opcode = X_GLrop_VertexAttrib4uivARB;
1539 true_immediate_count = 4;
1540 break;
1541 case GL_FLOAT:
1542 opcode = float_ops[size];
1543 break;
1544 case GL_DOUBLE:
1545 opcode = double_ops[size];
1546 break;
1547 default:
1548 __glXSetError(gc, GL_INVALID_ENUM);
1549 return;
1550 }
1551 }
1552
1553 a = get_array_entry( arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index );
1554 if ( a == NULL ) {
1555 __glXSetError(gc, GL_INVALID_OPERATION);
1556 return;
1557 }
1558
1559 COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, size, normalized, 8,
1560 opcode );
1561
1562 true_immediate_size = __glXTypeSize(type) * true_immediate_count;
1563 ((uint16_t *) (a)->header)[0] = __GLX_PAD(a->header_size
1564 + true_immediate_size);
1565
1566 if ( a->enabled ) {
1567 arrays->array_info_cache_valid = GL_FALSE;
1568 }
1569 }
1570
1571
1572 /**
1573 * I don't have 100% confidence that this is correct. The different rules
1574 * about whether or not generic vertex attributes alias "classic" vertex
1575 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
1576 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My
1577 * feeling is that the client-side doesn't have to worry about it. The
1578 * client just sends all the data to the server and lets the server deal
1579 * with it.
1580 */
1581 void __indirect_glVertexAttribPointerNV( GLuint index, GLint size,
1582 GLenum type, GLsizei stride,
1583 const GLvoid * pointer)
1584 {
1585 __GLXcontext *gc = __glXGetCurrentContext();
1586 GLboolean normalized = GL_FALSE;
1587
1588
1589 switch( type ) {
1590 case GL_UNSIGNED_BYTE:
1591 if ( size != 4 ) {
1592 __glXSetError(gc, GL_INVALID_VALUE);
1593 return;
1594 }
1595 normalized = GL_TRUE;
1596
1597 case GL_SHORT:
1598 case GL_FLOAT:
1599 case GL_DOUBLE:
1600 __indirect_glVertexAttribPointerARB(index, size, type,
1601 normalized,
1602 stride, pointer);
1603 return;
1604 default:
1605 __glXSetError(gc, GL_INVALID_ENUM);
1606 return;
1607 }
1608 }
1609
1610
1611 void __indirect_glClientActiveTextureARB(GLenum texture)
1612 {
1613 __GLXcontext * const gc = __glXGetCurrentContext();
1614 __GLXattribute * const state = (__GLXattribute *)(gc->client_state_private);
1615 struct array_state_vector * const arrays = state->array_state;
1616 const GLint unit = (GLint) texture - GL_TEXTURE0;
1617
1618
1619 if ( (unit < 0) || (unit >= arrays->num_texture_units) ) {
1620 __glXSetError(gc, GL_INVALID_ENUM);
1621 return;
1622 }
1623
1624 arrays->active_texture_unit = unit;
1625 }
1626
1627
1628 /**
1629 * Modify the enable state for the selected array
1630 */
1631 GLboolean
1632 __glXSetArrayEnable(__GLXattribute *state, GLenum key, unsigned index,
1633 GLboolean enable)
1634 {
1635 struct array_state_vector * arrays = state->array_state;
1636 struct array_state * a;
1637
1638
1639 /* Texture coordinate arrays have an implict index set when the
1640 * application calls glClientActiveTexture.
1641 */
1642 if (key == GL_TEXTURE_COORD_ARRAY) {
1643 index = arrays->active_texture_unit;
1644 }
1645
1646 a = get_array_entry( arrays, key, index );
1647
1648 if ( (a != NULL) && (a->enabled != enable) ) {
1649 a->enabled = enable;
1650 arrays->array_info_cache_valid = GL_FALSE;
1651 }
1652
1653 return (a != NULL);
1654 }
1655
1656
1657 void
1658 __glXArrayDisableAll( __GLXattribute * state )
1659 {
1660 struct array_state_vector * arrays = state->array_state;
1661 unsigned i;
1662
1663
1664 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
1665 arrays->arrays[i].enabled = GL_FALSE;
1666 }
1667
1668 arrays->array_info_cache_valid = GL_FALSE;
1669 }
1670
1671
1672 /**
1673 */
1674 GLboolean
1675 __glXGetArrayEnable( const __GLXattribute * const state,
1676 GLenum key, unsigned index, GLintptr * dest )
1677 {
1678 const struct array_state_vector * arrays = state->array_state;
1679 const struct array_state * a = get_array_entry( (struct array_state_vector *) arrays,
1680 key, index );
1681
1682 if ( a != NULL ) {
1683 *dest = (GLintptr) a->enabled;
1684 }
1685
1686 return (a != NULL);
1687 }
1688
1689
1690 /**
1691 */
1692 GLboolean
1693 __glXGetArrayType( const __GLXattribute * const state,
1694 GLenum key, unsigned index, GLintptr * dest )
1695 {
1696 const struct array_state_vector * arrays = state->array_state;
1697 const struct array_state * a = get_array_entry( (struct array_state_vector *) arrays,
1698 key, index );
1699
1700 if ( a != NULL ) {
1701 *dest = (GLintptr) a->data_type;
1702 }
1703
1704 return (a != NULL);
1705 }
1706
1707
1708 /**
1709 */
1710 GLboolean
1711 __glXGetArraySize( const __GLXattribute * const state,
1712 GLenum key, unsigned index, GLintptr * dest )
1713 {
1714 const struct array_state_vector * arrays = state->array_state;
1715 const struct array_state * a = get_array_entry( (struct array_state_vector *) arrays,
1716 key, index );
1717
1718 if ( a != NULL ) {
1719 *dest = (GLintptr) a->count;
1720 }
1721
1722 return (a != NULL);
1723 }
1724
1725
1726 /**
1727 */
1728 GLboolean
1729 __glXGetArrayStride( const __GLXattribute * const state,
1730 GLenum key, unsigned index, GLintptr * dest )
1731 {
1732 const struct array_state_vector * arrays = state->array_state;
1733 const struct array_state * a = get_array_entry( (struct array_state_vector *) arrays,
1734 key, index );
1735
1736 if ( a != NULL ) {
1737 *dest = (GLintptr) a->user_stride;
1738 }
1739
1740 return (a != NULL);
1741 }
1742
1743
1744 /**
1745 */
1746 GLboolean
1747 __glXGetArrayPointer( const __GLXattribute * const state,
1748 GLenum key, unsigned index, void ** dest )
1749 {
1750 const struct array_state_vector * arrays = state->array_state;
1751 const struct array_state * a = get_array_entry( (struct array_state_vector *) arrays,
1752 key, index );
1753
1754
1755 if ( a != NULL ) {
1756 *dest = (void *) (a->data);
1757 }
1758
1759 return (a != NULL);
1760 }
1761
1762
1763 /**
1764 */
1765 GLboolean
1766 __glXGetArrayNormalized( const __GLXattribute * const state,
1767 GLenum key, unsigned index, GLintptr * dest )
1768 {
1769 const struct array_state_vector * arrays = state->array_state;
1770 const struct array_state * a = get_array_entry( (struct array_state_vector *) arrays,
1771 key, index );
1772
1773
1774 if ( a != NULL ) {
1775 *dest = (GLintptr) a->normalized;
1776 }
1777
1778 return (a != NULL);
1779 }
1780
1781
1782 /**
1783 */
1784 GLuint
1785 __glXGetActiveTextureUnit( const __GLXattribute * const state )
1786 {
1787 return state->array_state->active_texture_unit;
1788 }
1789
1790
1791 void
1792 __glXPushArrayState( __GLXattribute * state )
1793 {
1794 struct array_state_vector * arrays = state->array_state;
1795 struct array_stack_state * stack = & arrays->stack[ (arrays->stack_index * arrays->num_arrays)];
1796 unsigned i;
1797
1798 /* XXX are we pushing _all_ the necessary fields? */
1799 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
1800 stack[i].data = arrays->arrays[i].data;
1801 stack[i].data_type = arrays->arrays[i].data_type;
1802 stack[i].user_stride = arrays->arrays[i].user_stride;
1803 stack[i].count = arrays->arrays[i].count;
1804 stack[i].key = arrays->arrays[i].key;
1805 stack[i].index = arrays->arrays[i].index;
1806 stack[i].enabled = arrays->arrays[i].enabled;
1807 }
1808
1809 arrays->active_texture_unit_stack[ arrays->stack_index ] =
1810 arrays->active_texture_unit;
1811
1812 arrays->stack_index++;
1813 }
1814
1815
1816 void
1817 __glXPopArrayState( __GLXattribute * state )
1818 {
1819 struct array_state_vector * arrays = state->array_state;
1820 struct array_stack_state * stack;
1821 unsigned i;
1822
1823
1824 arrays->stack_index--;
1825 stack = & arrays->stack[ (arrays->stack_index * arrays->num_arrays) ];
1826
1827 for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
1828 switch ( stack[i].key ) {
1829 case GL_NORMAL_ARRAY:
1830 __indirect_glNormalPointer( stack[i].data_type,
1831 stack[i].user_stride,
1832 stack[i].data );
1833 break;
1834 case GL_COLOR_ARRAY:
1835 __indirect_glColorPointer( stack[i].count,
1836 stack[i].data_type,
1837 stack[i].user_stride,
1838 stack[i].data );
1839 break;
1840 case GL_INDEX_ARRAY:
1841 __indirect_glIndexPointer( stack[i].data_type,
1842 stack[i].user_stride,
1843 stack[i].data );
1844 break;
1845 case GL_EDGE_FLAG_ARRAY:
1846 __indirect_glEdgeFlagPointer( stack[i].user_stride,
1847 stack[i].data );
1848 break;
1849 case GL_TEXTURE_COORD_ARRAY:
1850 arrays->active_texture_unit = stack[i].index;
1851 __indirect_glTexCoordPointer( stack[i].count,
1852 stack[i].data_type,
1853 stack[i].user_stride,
1854 stack[i].data );
1855 break;
1856 case GL_SECONDARY_COLOR_ARRAY:
1857 __indirect_glSecondaryColorPointerEXT( stack[i].count,
1858 stack[i].data_type,
1859 stack[i].user_stride,
1860 stack[i].data );
1861 break;
1862 case GL_FOG_COORDINATE_ARRAY:
1863 __indirect_glFogCoordPointerEXT( stack[i].data_type,
1864 stack[i].user_stride,
1865 stack[i].data );
1866 break;
1867
1868 }
1869
1870 __glXSetArrayEnable( state, stack[i].key, stack[i].index,
1871 stack[i].enabled );
1872 }
1873
1874 arrays->active_texture_unit =
1875 arrays->active_texture_unit_stack[ arrays->stack_index ];
1876 }