2 * (C) Copyright IBM Corporation 2004, 2005
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 #include "glxclient.h"
32 #include <GL/glxproto.h>
33 #include "glxextensions.h"
34 #include "indirect_vertex_array.h"
35 #include "indirect_vertex_array_priv.h"
37 #define __GLX_PAD(n) (((n)+3) & ~3)
40 * \file indirect_vertex_array.c
41 * Implement GLX protocol for vertex arrays and vertex buffer objects.
43 * The most important function in this fill is \c fill_array_info_cache.
44 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
45 * in the DrawArrays protocol. Certain operations, such as enabling or
46 * disabling an array, can invalidate this cache. \c fill_array_info_cache
47 * fills-in this data. Additionally, it examines the enabled state and
48 * other factors to determine what "version" of DrawArrays protocoal can be
51 * Current, only two versions of DrawArrays protocol are implemented. The
52 * first version is the "none" protocol. This is the fallback when the
53 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented
54 * by sending batches of immediate mode commands that are equivalent to the
55 * DrawArrays protocol.
57 * The other protocol that is currently implemented is the "old" protocol.
58 * This is the GL 1.1 DrawArrays protocol. The only difference between GL
59 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
60 * This protocol is called "old" because the ARB is in the process of
61 * defining a new protocol, which will probably be called wither "new" or
62 * "vbo", to support multiple texture coordinate arrays, generic attributes,
63 * and vertex buffer objects.
65 * \author Ian Romanick <ian.d.romanick@intel.com>
68 static void emit_DrawArrays_none(GLenum mode
, GLint first
, GLsizei count
);
69 static void emit_DrawArrays_old(GLenum mode
, GLint first
, GLsizei count
);
71 static void emit_DrawElements_none(GLenum mode
, GLsizei count
, GLenum type
,
72 const GLvoid
* indices
);
73 static void emit_DrawElements_old(GLenum mode
, GLsizei count
, GLenum type
,
74 const GLvoid
* indices
);
77 static GLubyte
*emit_element_none(GLubyte
* dst
,
78 const struct array_state_vector
*arrays
,
80 static GLubyte
*emit_element_old(GLubyte
* dst
,
81 const struct array_state_vector
*arrays
,
83 static struct array_state
*get_array_entry(const struct array_state_vector
86 static void fill_array_info_cache(struct array_state_vector
*arrays
);
87 static GLboolean
validate_mode(struct glx_context
* gc
, GLenum mode
);
88 static GLboolean
validate_count(struct glx_context
* gc
, GLsizei count
);
89 static GLboolean
validate_type(struct glx_context
* gc
, GLenum type
);
93 * Table of sizes, in bytes, of a GL types. All of the type enums are be in
94 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e.,
95 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the
96 * type enums masked with 0x0f.
99 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES,
100 * \c GL_3_BYTES, or \c GL_4_BYTES.
102 const GLuint __glXTypeSize_table
[16] = {
103 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
108 * Free the per-context array state that was allocated with
109 * __glXInitVertexArrayState().
112 __glXFreeVertexArrayState(struct glx_context
* gc
)
114 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
115 struct array_state_vector
*arrays
= state
->array_state
;
119 arrays
->stack
= NULL
;
120 free(arrays
->arrays
);
121 arrays
->arrays
= NULL
;
123 state
->array_state
= NULL
;
129 * Initialize vertex array state of a GLX context.
131 * \param gc GLX context whose vertex array state is to be initialized.
134 * This function may only be called after struct glx_context::gl_extension_bits,
135 * struct glx_context::server_minor, and __GLXcontext::server_major have been
136 * initialized. These values are used to determine what vertex arrays are
140 __glXInitVertexArrayState(struct glx_context
* gc
)
142 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
143 struct array_state_vector
*arrays
;
145 unsigned array_count
;
146 int texture_units
= 1, vertex_program_attribs
= 0;
149 GLboolean got_fog
= GL_FALSE
;
150 GLboolean got_secondary_color
= GL_FALSE
;
153 arrays
= calloc(1, sizeof(struct array_state_vector
));
155 if (arrays
== NULL
) {
156 __glXSetError(gc
, GL_OUT_OF_MEMORY
);
160 arrays
->old_DrawArrays_possible
= !state
->NoDrawArraysProtocol
;
161 arrays
->new_DrawArrays_possible
= GL_FALSE
;
162 arrays
->DrawArrays
= NULL
;
164 arrays
->active_texture_unit
= 0;
167 /* Determine how many arrays are actually needed. Only arrays that
168 * are supported by the server are create. For example, if the server
169 * supports only 2 texture units, then only 2 texture coordinate arrays
172 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
173 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
174 * GL_EDGE_FLAG_ARRAY are supported.
179 if (__glExtensionBitIsEnabled(gc
, GL_EXT_fog_coord_bit
)
180 || (gc
->server_major
> 1) || (gc
->server_minor
>= 4)) {
185 if (__glExtensionBitIsEnabled(gc
, GL_EXT_secondary_color_bit
)
186 || (gc
->server_major
> 1) || (gc
->server_minor
>= 4)) {
187 got_secondary_color
= GL_TRUE
;
191 if (__glExtensionBitIsEnabled(gc
, GL_ARB_multitexture_bit
)
192 || (gc
->server_major
> 1) || (gc
->server_minor
>= 3)) {
193 __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS
, &texture_units
);
196 if (__glExtensionBitIsEnabled(gc
, GL_ARB_vertex_program_bit
)) {
197 __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB
,
198 GL_MAX_PROGRAM_ATTRIBS_ARB
,
199 &vertex_program_attribs
);
202 arrays
->num_texture_units
= texture_units
;
203 arrays
->num_vertex_program_attribs
= vertex_program_attribs
;
204 array_count
+= texture_units
+ vertex_program_attribs
;
205 arrays
->num_arrays
= array_count
;
206 arrays
->arrays
= calloc(array_count
, sizeof(struct array_state
));
208 if (arrays
->arrays
== NULL
) {
210 __glXSetError(gc
, GL_OUT_OF_MEMORY
);
214 arrays
->arrays
[0].data_type
= GL_FLOAT
;
215 arrays
->arrays
[0].count
= 3;
216 arrays
->arrays
[0].key
= GL_NORMAL_ARRAY
;
217 arrays
->arrays
[0].normalized
= GL_TRUE
;
218 arrays
->arrays
[0].old_DrawArrays_possible
= GL_TRUE
;
220 arrays
->arrays
[1].data_type
= GL_FLOAT
;
221 arrays
->arrays
[1].count
= 4;
222 arrays
->arrays
[1].key
= GL_COLOR_ARRAY
;
223 arrays
->arrays
[1].normalized
= GL_TRUE
;
224 arrays
->arrays
[1].old_DrawArrays_possible
= GL_TRUE
;
226 arrays
->arrays
[2].data_type
= GL_FLOAT
;
227 arrays
->arrays
[2].count
= 1;
228 arrays
->arrays
[2].key
= GL_INDEX_ARRAY
;
229 arrays
->arrays
[2].old_DrawArrays_possible
= GL_TRUE
;
231 arrays
->arrays
[3].data_type
= GL_UNSIGNED_BYTE
;
232 arrays
->arrays
[3].count
= 1;
233 arrays
->arrays
[3].key
= GL_EDGE_FLAG_ARRAY
;
234 arrays
->arrays
[3].old_DrawArrays_possible
= GL_TRUE
;
236 for (i
= 0; i
< texture_units
; i
++) {
237 arrays
->arrays
[4 + i
].data_type
= GL_FLOAT
;
238 arrays
->arrays
[4 + i
].count
= 4;
239 arrays
->arrays
[4 + i
].key
= GL_TEXTURE_COORD_ARRAY
;
241 arrays
->arrays
[4 + i
].old_DrawArrays_possible
= (i
== 0);
242 arrays
->arrays
[4 + i
].index
= i
;
245 i
= 4 + texture_units
;
248 arrays
->arrays
[i
].data_type
= GL_FLOAT
;
249 arrays
->arrays
[i
].count
= 1;
250 arrays
->arrays
[i
].key
= GL_FOG_COORDINATE_ARRAY
;
251 arrays
->arrays
[i
].old_DrawArrays_possible
= GL_TRUE
;
255 if (got_secondary_color
) {
256 arrays
->arrays
[i
].data_type
= GL_FLOAT
;
257 arrays
->arrays
[i
].count
= 3;
258 arrays
->arrays
[i
].key
= GL_SECONDARY_COLOR_ARRAY
;
259 arrays
->arrays
[i
].old_DrawArrays_possible
= GL_TRUE
;
260 arrays
->arrays
[i
].normalized
= GL_TRUE
;
265 for (j
= 0; j
< vertex_program_attribs
; j
++) {
266 const unsigned idx
= (vertex_program_attribs
- (j
+ 1));
269 arrays
->arrays
[idx
+ i
].data_type
= GL_FLOAT
;
270 arrays
->arrays
[idx
+ i
].count
= 4;
271 arrays
->arrays
[idx
+ i
].key
= GL_VERTEX_ATTRIB_ARRAY_POINTER
;
273 arrays
->arrays
[idx
+ i
].old_DrawArrays_possible
= 0;
274 arrays
->arrays
[idx
+ i
].index
= idx
;
277 i
+= vertex_program_attribs
;
280 /* Vertex array *must* be last because of the way that
281 * emit_DrawArrays_none works.
284 arrays
->arrays
[i
].data_type
= GL_FLOAT
;
285 arrays
->arrays
[i
].count
= 4;
286 arrays
->arrays
[i
].key
= GL_VERTEX_ARRAY
;
287 arrays
->arrays
[i
].old_DrawArrays_possible
= GL_TRUE
;
289 assert((i
+ 1) == arrays
->num_arrays
);
291 arrays
->stack_index
= 0;
292 arrays
->stack
= malloc(sizeof(struct array_stack_state
)
294 * __GL_CLIENT_ATTRIB_STACK_DEPTH
);
296 if (arrays
->stack
== NULL
) {
297 free(arrays
->arrays
);
299 __glXSetError(gc
, GL_OUT_OF_MEMORY
);
303 /* Everything went ok so we put vertex array state in place
306 state
->array_state
= arrays
;
311 * Calculate the size of a single vertex for the "none" protocol. This is
312 * essentially the size of all the immediate-mode commands required to
313 * implement the enabled vertex arrays.
316 calculate_single_vertex_size_none(const struct array_state_vector
*arrays
)
318 size_t single_vertex_size
= 0;
322 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
323 if (arrays
->arrays
[i
].enabled
) {
324 single_vertex_size
+= arrays
->arrays
[i
].header
[0];
328 return single_vertex_size
;
333 * Emit a single element using non-DrawArrays protocol.
336 emit_element_none(GLubyte
* dst
,
337 const struct array_state_vector
* arrays
, unsigned index
)
342 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
343 if (arrays
->arrays
[i
].enabled
) {
344 const size_t offset
= index
* arrays
->arrays
[i
].true_stride
;
346 /* The generic attributes can have more data than is in the
347 * elements. This is because a vertex array can be a 2 element,
348 * normalized, unsigned short, but the "closest" immediate mode
349 * protocol is for a 4Nus. Since the sizes are small, the
350 * performance impact on modern processors should be negligible.
352 (void) memset(dst
, 0, arrays
->arrays
[i
].header
[0]);
354 (void) memcpy(dst
, arrays
->arrays
[i
].header
, 4);
358 if (arrays
->arrays
[i
].key
== GL_TEXTURE_COORD_ARRAY
&&
359 arrays
->arrays
[i
].index
> 0) {
360 /* Multi-texture coordinate arrays require the texture target
361 * to be sent. For doubles it is after the data, for everything
364 GLenum texture
= arrays
->arrays
[i
].index
+ GL_TEXTURE0
;
365 if (arrays
->arrays
[i
].data_type
== GL_DOUBLE
) {
366 (void) memcpy(dst
, ((GLubyte
*) arrays
->arrays
[i
].data
) + offset
,
367 arrays
->arrays
[i
].element_size
);
368 dst
+= arrays
->arrays
[i
].element_size
;
369 (void) memcpy(dst
, &texture
, 4);
372 (void) memcpy(dst
, &texture
, 4);
374 (void) memcpy(dst
, ((GLubyte
*) arrays
->arrays
[i
].data
) + offset
,
375 arrays
->arrays
[i
].element_size
);
376 dst
+= __GLX_PAD(arrays
->arrays
[i
].element_size
);
378 } else if (arrays
->arrays
[i
].key
== GL_VERTEX_ATTRIB_ARRAY_POINTER
) {
379 /* Vertex attribute data requires the index sent first.
381 (void) memcpy(dst
, &arrays
->arrays
[i
].index
, 4);
383 (void) memcpy(dst
, ((GLubyte
*) arrays
->arrays
[i
].data
) + offset
,
384 arrays
->arrays
[i
].element_size
);
385 dst
+= __GLX_PAD(arrays
->arrays
[i
].element_size
);
387 (void) memcpy(dst
, ((GLubyte
*) arrays
->arrays
[i
].data
) + offset
,
388 arrays
->arrays
[i
].element_size
);
389 dst
+= __GLX_PAD(arrays
->arrays
[i
].element_size
);
399 * Emit a single element using "old" DrawArrays protocol from
400 * EXT_vertex_arrays / OpenGL 1.1.
403 emit_element_old(GLubyte
* dst
,
404 const struct array_state_vector
* arrays
, unsigned index
)
409 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
410 if (arrays
->arrays
[i
].enabled
) {
411 const size_t offset
= index
* arrays
->arrays
[i
].true_stride
;
413 (void) memcpy(dst
, ((GLubyte
*) arrays
->arrays
[i
].data
) + offset
,
414 arrays
->arrays
[i
].element_size
);
416 dst
+= __GLX_PAD(arrays
->arrays
[i
].element_size
);
425 get_array_entry(const struct array_state_vector
*arrays
,
426 GLenum key
, unsigned index
)
430 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
431 if ((arrays
->arrays
[i
].key
== key
)
432 && (arrays
->arrays
[i
].index
== index
)) {
433 return &arrays
->arrays
[i
];
442 allocate_array_info_cache(struct array_state_vector
*arrays
,
443 size_t required_size
)
445 #define MAX_HEADER_SIZE 20
446 if (arrays
->array_info_cache_buffer_size
< required_size
) {
447 GLubyte
*temp
= realloc(arrays
->array_info_cache_base
,
448 required_size
+ MAX_HEADER_SIZE
);
454 arrays
->array_info_cache_base
= temp
;
455 arrays
->array_info_cache
= temp
+ MAX_HEADER_SIZE
;
456 arrays
->array_info_cache_buffer_size
= required_size
;
459 arrays
->array_info_cache_size
= required_size
;
467 fill_array_info_cache(struct array_state_vector
*arrays
)
469 GLboolean old_DrawArrays_possible
;
473 /* Determine how many arrays are enabled.
476 arrays
->enabled_client_array_count
= 0;
477 old_DrawArrays_possible
= arrays
->old_DrawArrays_possible
;
478 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
479 if (arrays
->arrays
[i
].enabled
) {
480 arrays
->enabled_client_array_count
++;
481 old_DrawArrays_possible
&= arrays
->arrays
[i
].old_DrawArrays_possible
;
485 if (arrays
->new_DrawArrays_possible
) {
486 assert(!arrays
->new_DrawArrays_possible
);
488 else if (old_DrawArrays_possible
) {
489 const size_t required_size
= arrays
->enabled_client_array_count
* 12;
493 if (!allocate_array_info_cache(arrays
, required_size
)) {
498 info
= (uint32_t *) arrays
->array_info_cache
;
499 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
500 if (arrays
->arrays
[i
].enabled
) {
501 *(info
++) = arrays
->arrays
[i
].data_type
;
502 *(info
++) = arrays
->arrays
[i
].count
;
503 *(info
++) = arrays
->arrays
[i
].key
;
507 arrays
->DrawArrays
= emit_DrawArrays_old
;
508 arrays
->DrawElements
= emit_DrawElements_old
;
511 arrays
->DrawArrays
= emit_DrawArrays_none
;
512 arrays
->DrawElements
= emit_DrawElements_none
;
515 arrays
->array_info_cache_valid
= GL_TRUE
;
520 * Emit a \c glDrawArrays command using the "none" protocol. That is,
521 * emit immediate-mode commands that are equivalent to the requiested
522 * \c glDrawArrays command. This is used with servers that don't support
523 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
524 * vertex state is enabled that is not compatible with that protocol.
527 emit_DrawArrays_none(GLenum mode
, GLint first
, GLsizei count
)
529 struct glx_context
*gc
= __glXGetCurrentContext();
530 const __GLXattribute
*state
=
531 (const __GLXattribute
*) (gc
->client_state_private
);
532 struct array_state_vector
*arrays
= state
->array_state
;
534 size_t single_vertex_size
;
537 static const uint16_t begin_cmd
[2] = { 8, X_GLrop_Begin
};
538 static const uint16_t end_cmd
[2] = { 4, X_GLrop_End
};
541 single_vertex_size
= calculate_single_vertex_size_none(arrays
);
545 (void) memcpy(pc
, begin_cmd
, 4);
546 *(int *) (pc
+ 4) = mode
;
550 for (i
= 0; i
< count
; i
++) {
551 if ((pc
+ single_vertex_size
) >= gc
->bufEnd
) {
552 pc
= __glXFlushRenderBuffer(gc
, pc
);
555 pc
= emit_element_none(pc
, arrays
, first
+ i
);
558 if ((pc
+ 4) >= gc
->bufEnd
) {
559 pc
= __glXFlushRenderBuffer(gc
, pc
);
562 (void) memcpy(pc
, end_cmd
, 4);
566 if (gc
->pc
> gc
->limit
) {
567 (void) __glXFlushRenderBuffer(gc
, gc
->pc
);
573 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
576 * \param gc GLX context.
577 * \param arrays Array state.
578 * \param elements_per_request Location to store the number of elements that
579 * can fit in a single Render / RenderLarge
581 * \param total_request Total number of requests for a RenderLarge
582 * command. If a Render command is used, this
584 * \param mode Drawing mode.
585 * \param count Number of vertices.
588 * A pointer to the buffer for array data.
591 emit_DrawArrays_header_old(struct glx_context
* gc
,
592 struct array_state_vector
*arrays
,
593 size_t * elements_per_request
,
594 unsigned int *total_requests
,
595 GLenum mode
, GLsizei count
)
598 size_t single_vertex_size
;
599 const unsigned header_size
= 16;
604 /* Determine the size of the whole command. This includes the header,
605 * the ARRAY_INFO data and the array data. Once this size is calculated,
606 * it will be known whether a Render or RenderLarge command is needed.
609 single_vertex_size
= 0;
610 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
611 if (arrays
->arrays
[i
].enabled
) {
612 single_vertex_size
+= __GLX_PAD(arrays
->arrays
[i
].element_size
);
616 command_size
= arrays
->array_info_cache_size
+ header_size
617 + (single_vertex_size
* count
);
620 /* Write the header for either a Render command or a RenderLarge
621 * command. After the header is written, write the ARRAY_INFO data.
624 if (command_size
> gc
->maxSmallRenderCommandSize
) {
625 /* maxSize is the maximum amount of data can be stuffed into a single
626 * packet. sz_xGLXRenderReq is added because bufSize is the maximum
627 * packet size minus sz_xGLXRenderReq.
629 const size_t maxSize
= (gc
->bufSize
+ sz_xGLXRenderReq
)
630 - sz_xGLXRenderLargeReq
;
631 unsigned vertex_requests
;
634 /* Calculate the number of data packets that will be required to send
635 * the whole command. To do this, the number of verticies that
636 * will fit in a single buffer must be calculated.
638 * The important value here is elements_per_request. This is the
639 * number of complete array elements that will fit in a single
640 * buffer. There may be some wasted space at the end of the buffer,
641 * but splitting elements across buffer boundries would be painful.
644 elements_per_request
[0] = maxSize
/ single_vertex_size
;
646 vertex_requests
= (count
+ elements_per_request
[0] - 1)
647 / elements_per_request
[0];
649 *total_requests
= vertex_requests
+ 1;
652 __glXFlushRenderBuffer(gc
, gc
->pc
);
656 pc
= ((GLubyte
*) arrays
->array_info_cache
) - (header_size
+ 4);
657 *(uint32_t *) (pc
+ 0) = command_size
;
658 *(uint32_t *) (pc
+ 4) = X_GLrop_DrawArrays
;
659 *(uint32_t *) (pc
+ 8) = count
;
660 *(uint32_t *) (pc
+ 12) = arrays
->enabled_client_array_count
;
661 *(uint32_t *) (pc
+ 16) = mode
;
663 __glXSendLargeChunk(gc
, 1, *total_requests
, pc
,
664 header_size
+ 4 + arrays
->array_info_cache_size
);
669 if ((gc
->pc
+ command_size
) >= gc
->bufEnd
) {
670 (void) __glXFlushRenderBuffer(gc
, gc
->pc
);
674 *(uint16_t *) (pc
+ 0) = command_size
;
675 *(uint16_t *) (pc
+ 2) = X_GLrop_DrawArrays
;
676 *(uint32_t *) (pc
+ 4) = count
;
677 *(uint32_t *) (pc
+ 8) = arrays
->enabled_client_array_count
;
678 *(uint32_t *) (pc
+ 12) = mode
;
682 (void) memcpy(pc
, arrays
->array_info_cache
,
683 arrays
->array_info_cache_size
);
684 pc
+= arrays
->array_info_cache_size
;
686 *elements_per_request
= count
;
698 emit_DrawArrays_old(GLenum mode
, GLint first
, GLsizei count
)
700 struct glx_context
*gc
= __glXGetCurrentContext();
701 const __GLXattribute
*state
=
702 (const __GLXattribute
*) (gc
->client_state_private
);
703 struct array_state_vector
*arrays
= state
->array_state
;
706 size_t elements_per_request
;
707 unsigned total_requests
= 0;
709 size_t total_sent
= 0;
712 pc
= emit_DrawArrays_header_old(gc
, arrays
, &elements_per_request
,
713 &total_requests
, mode
, count
);
719 if (total_requests
== 0) {
720 assert(elements_per_request
>= count
);
722 for (i
= 0; i
< count
; i
++) {
723 pc
= emit_element_old(pc
, arrays
, i
+ first
);
726 assert(pc
<= gc
->bufEnd
);
729 if (gc
->pc
> gc
->limit
) {
730 (void) __glXFlushRenderBuffer(gc
, gc
->pc
);
737 for (req
= 2; req
<= total_requests
; req
++) {
738 if (count
< elements_per_request
) {
739 elements_per_request
= count
;
743 for (i
= 0; i
< elements_per_request
; i
++) {
744 pc
= emit_element_old(pc
, arrays
, i
+ first
);
747 first
+= elements_per_request
;
749 total_sent
+= (size_t) (pc
- gc
->pc
);
750 __glXSendLargeChunk(gc
, req
, total_requests
, gc
->pc
, pc
- gc
->pc
);
752 count
-= elements_per_request
;
759 emit_DrawElements_none(GLenum mode
, GLsizei count
, GLenum type
,
760 const GLvoid
* indices
)
762 struct glx_context
*gc
= __glXGetCurrentContext();
763 const __GLXattribute
*state
=
764 (const __GLXattribute
*) (gc
->client_state_private
);
765 struct array_state_vector
*arrays
= state
->array_state
;
766 static const uint16_t begin_cmd
[2] = { 8, X_GLrop_Begin
};
767 static const uint16_t end_cmd
[2] = { 4, X_GLrop_End
};
770 size_t single_vertex_size
;
774 single_vertex_size
= calculate_single_vertex_size_none(arrays
);
777 if ((gc
->pc
+ single_vertex_size
) >= gc
->bufEnd
) {
778 gc
->pc
= __glXFlushRenderBuffer(gc
, gc
->pc
);
783 (void) memcpy(pc
, begin_cmd
, 4);
784 *(int *) (pc
+ 4) = mode
;
788 for (i
= 0; i
< count
; i
++) {
791 if ((pc
+ single_vertex_size
) >= gc
->bufEnd
) {
792 pc
= __glXFlushRenderBuffer(gc
, pc
);
796 case GL_UNSIGNED_INT
:
797 index
= (unsigned) (((GLuint
*) indices
)[i
]);
799 case GL_UNSIGNED_SHORT
:
800 index
= (unsigned) (((GLushort
*) indices
)[i
]);
802 case GL_UNSIGNED_BYTE
:
803 index
= (unsigned) (((GLubyte
*) indices
)[i
]);
806 pc
= emit_element_none(pc
, arrays
, index
);
809 if ((pc
+ 4) >= gc
->bufEnd
) {
810 pc
= __glXFlushRenderBuffer(gc
, pc
);
813 (void) memcpy(pc
, end_cmd
, 4);
817 if (gc
->pc
> gc
->limit
) {
818 (void) __glXFlushRenderBuffer(gc
, gc
->pc
);
826 emit_DrawElements_old(GLenum mode
, GLsizei count
, GLenum type
,
827 const GLvoid
* indices
)
829 struct glx_context
*gc
= __glXGetCurrentContext();
830 const __GLXattribute
*state
=
831 (const __GLXattribute
*) (gc
->client_state_private
);
832 struct array_state_vector
*arrays
= state
->array_state
;
835 size_t elements_per_request
;
836 unsigned total_requests
= 0;
839 unsigned req_element
= 0;
842 pc
= emit_DrawArrays_header_old(gc
, arrays
, &elements_per_request
,
843 &total_requests
, mode
, count
);
851 if (count
< elements_per_request
) {
852 elements_per_request
= count
;
856 case GL_UNSIGNED_INT
:{
857 const GLuint
*ui_ptr
= (const GLuint
*) indices
+ req_element
;
859 for (i
= 0; i
< elements_per_request
; i
++) {
860 const GLint index
= (GLint
) * (ui_ptr
++);
861 pc
= emit_element_old(pc
, arrays
, index
);
865 case GL_UNSIGNED_SHORT
:{
866 const GLushort
*us_ptr
= (const GLushort
*) indices
+ req_element
;
868 for (i
= 0; i
< elements_per_request
; i
++) {
869 const GLint index
= (GLint
) * (us_ptr
++);
870 pc
= emit_element_old(pc
, arrays
, index
);
874 case GL_UNSIGNED_BYTE
:{
875 const GLubyte
*ub_ptr
= (const GLubyte
*) indices
+ req_element
;
877 for (i
= 0; i
< elements_per_request
; i
++) {
878 const GLint index
= (GLint
) * (ub_ptr
++);
879 pc
= emit_element_old(pc
, arrays
, index
);
885 if (total_requests
!= 0) {
886 __glXSendLargeChunk(gc
, req
, total_requests
, gc
->pc
, pc
- gc
->pc
);
891 count
-= elements_per_request
;
892 req_element
+= elements_per_request
;
896 assert((total_requests
== 0) || ((req
- 1) == total_requests
));
898 if (total_requests
== 0) {
899 assert(pc
<= gc
->bufEnd
);
902 if (gc
->pc
> gc
->limit
) {
903 (void) __glXFlushRenderBuffer(gc
, gc
->pc
);
910 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
911 * If it is not valid, then an error code is set in the GLX context.
914 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
917 validate_mode(struct glx_context
* gc
, GLenum mode
)
924 case GL_TRIANGLE_STRIP
:
925 case GL_TRIANGLE_FAN
:
932 __glXSetError(gc
, GL_INVALID_ENUM
);
941 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
942 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
943 * being set. A value of zero will not result in an error being set, but
944 * will result in \c GL_FALSE being returned.
947 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
950 validate_count(struct glx_context
* gc
, GLsizei count
)
953 __glXSetError(gc
, GL_INVALID_VALUE
);
961 * Validate that the \c type parameter to \c glDrawElements, et. al. is
962 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
963 * \c GL_UNSIGNED_INT are valid.
966 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
969 validate_type(struct glx_context
* gc
, GLenum type
)
972 case GL_UNSIGNED_INT
:
973 case GL_UNSIGNED_SHORT
:
974 case GL_UNSIGNED_BYTE
:
977 __glXSetError(gc
, GL_INVALID_ENUM
);
984 __indirect_glDrawArrays(GLenum mode
, GLint first
, GLsizei count
)
986 struct glx_context
*gc
= __glXGetCurrentContext();
987 const __GLXattribute
*state
=
988 (const __GLXattribute
*) (gc
->client_state_private
);
989 struct array_state_vector
*arrays
= state
->array_state
;
992 if (validate_mode(gc
, mode
) && validate_count(gc
, count
)) {
993 if (!arrays
->array_info_cache_valid
) {
994 fill_array_info_cache(arrays
);
997 arrays
->DrawArrays(mode
, first
, count
);
1003 __indirect_glArrayElement(GLint index
)
1005 struct glx_context
*gc
= __glXGetCurrentContext();
1006 const __GLXattribute
*state
=
1007 (const __GLXattribute
*) (gc
->client_state_private
);
1008 struct array_state_vector
*arrays
= state
->array_state
;
1010 size_t single_vertex_size
;
1013 single_vertex_size
= calculate_single_vertex_size_none(arrays
);
1015 if ((gc
->pc
+ single_vertex_size
) >= gc
->bufEnd
) {
1016 gc
->pc
= __glXFlushRenderBuffer(gc
, gc
->pc
);
1019 gc
->pc
= emit_element_none(gc
->pc
, arrays
, index
);
1021 if (gc
->pc
> gc
->limit
) {
1022 (void) __glXFlushRenderBuffer(gc
, gc
->pc
);
1028 __indirect_glDrawElements(GLenum mode
, GLsizei count
, GLenum type
,
1029 const GLvoid
* indices
)
1031 struct glx_context
*gc
= __glXGetCurrentContext();
1032 const __GLXattribute
*state
=
1033 (const __GLXattribute
*) (gc
->client_state_private
);
1034 struct array_state_vector
*arrays
= state
->array_state
;
1037 if (validate_mode(gc
, mode
) && validate_count(gc
, count
)
1038 && validate_type(gc
, type
)) {
1039 if (!arrays
->array_info_cache_valid
) {
1040 fill_array_info_cache(arrays
);
1043 arrays
->DrawElements(mode
, count
, type
, indices
);
1049 __indirect_glDrawRangeElements(GLenum mode
, GLuint start
, GLuint end
,
1050 GLsizei count
, GLenum type
,
1051 const GLvoid
* indices
)
1053 struct glx_context
*gc
= __glXGetCurrentContext();
1054 const __GLXattribute
*state
=
1055 (const __GLXattribute
*) (gc
->client_state_private
);
1056 struct array_state_vector
*arrays
= state
->array_state
;
1059 if (validate_mode(gc
, mode
) && validate_count(gc
, count
)
1060 && validate_type(gc
, type
)) {
1062 __glXSetError(gc
, GL_INVALID_VALUE
);
1066 if (!arrays
->array_info_cache_valid
) {
1067 fill_array_info_cache(arrays
);
1070 arrays
->DrawElements(mode
, count
, type
, indices
);
1076 __indirect_glMultiDrawArrays(GLenum mode
, const GLint
*first
,
1077 const GLsizei
*count
, GLsizei primcount
)
1079 struct glx_context
*gc
= __glXGetCurrentContext();
1080 const __GLXattribute
*state
=
1081 (const __GLXattribute
*) (gc
->client_state_private
);
1082 struct array_state_vector
*arrays
= state
->array_state
;
1086 if (validate_mode(gc
, mode
)) {
1087 if (!arrays
->array_info_cache_valid
) {
1088 fill_array_info_cache(arrays
);
1091 for (i
= 0; i
< primcount
; i
++) {
1092 if (validate_count(gc
, count
[i
])) {
1093 arrays
->DrawArrays(mode
, first
[i
], count
[i
]);
1101 __indirect_glMultiDrawElementsEXT(GLenum mode
, const GLsizei
* count
,
1102 GLenum type
, const GLvoid
* const * indices
,
1105 struct glx_context
*gc
= __glXGetCurrentContext();
1106 const __GLXattribute
*state
=
1107 (const __GLXattribute
*) (gc
->client_state_private
);
1108 struct array_state_vector
*arrays
= state
->array_state
;
1112 if (validate_mode(gc
, mode
) && validate_type(gc
, type
)) {
1113 if (!arrays
->array_info_cache_valid
) {
1114 fill_array_info_cache(arrays
);
1117 for (i
= 0; i
< primcount
; i
++) {
1118 if (validate_count(gc
, count
[i
])) {
1119 arrays
->DrawElements(mode
, count
[i
], type
, indices
[i
]);
1126 /* The HDR_SIZE macro argument is the command header size (4 bytes)
1127 * plus any additional index word e.g. for texture units or vertex
1130 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
1133 (a)->data_type = TYPE; \
1134 (a)->user_stride = STRIDE; \
1135 (a)->count = COUNT; \
1136 (a)->normalized = NORMALIZED; \
1138 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \
1139 (a)->true_stride = (STRIDE == 0) \
1140 ? (a)->element_size : STRIDE; \
1142 (a)->header[0] = __GLX_PAD(HDR_SIZE + (a)->element_size); \
1143 (a)->header[1] = OPCODE; \
1148 __indirect_glVertexPointer(GLint size
, GLenum type
, GLsizei stride
,
1149 const GLvoid
* pointer
)
1151 static const uint16_t short_ops
[5] = {
1152 0, 0, X_GLrop_Vertex2sv
, X_GLrop_Vertex3sv
, X_GLrop_Vertex4sv
1154 static const uint16_t int_ops
[5] = {
1155 0, 0, X_GLrop_Vertex2iv
, X_GLrop_Vertex3iv
, X_GLrop_Vertex4iv
1157 static const uint16_t float_ops
[5] = {
1158 0, 0, X_GLrop_Vertex2fv
, X_GLrop_Vertex3fv
, X_GLrop_Vertex4fv
1160 static const uint16_t double_ops
[5] = {
1161 0, 0, X_GLrop_Vertex2dv
, X_GLrop_Vertex3dv
, X_GLrop_Vertex4dv
1164 struct glx_context
*gc
= __glXGetCurrentContext();
1165 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1166 struct array_state_vector
*arrays
= state
->array_state
;
1167 struct array_state
*a
;
1170 if (size
< 2 || size
> 4 || stride
< 0) {
1171 __glXSetError(gc
, GL_INVALID_VALUE
);
1177 opcode
= short_ops
[size
];
1180 opcode
= int_ops
[size
];
1183 opcode
= float_ops
[size
];
1186 opcode
= double_ops
[size
];
1189 __glXSetError(gc
, GL_INVALID_ENUM
);
1193 a
= get_array_entry(arrays
, GL_VERTEX_ARRAY
, 0);
1195 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, size
, GL_FALSE
, 4,
1199 arrays
->array_info_cache_valid
= GL_FALSE
;
1205 __indirect_glNormalPointer(GLenum type
, GLsizei stride
,
1206 const GLvoid
* pointer
)
1209 struct glx_context
*gc
= __glXGetCurrentContext();
1210 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1211 struct array_state_vector
*arrays
= state
->array_state
;
1212 struct array_state
*a
;
1216 __glXSetError(gc
, GL_INVALID_VALUE
);
1222 opcode
= X_GLrop_Normal3bv
;
1225 opcode
= X_GLrop_Normal3sv
;
1228 opcode
= X_GLrop_Normal3iv
;
1231 opcode
= X_GLrop_Normal3fv
;
1234 opcode
= X_GLrop_Normal3dv
;
1237 __glXSetError(gc
, GL_INVALID_ENUM
);
1241 a
= get_array_entry(arrays
, GL_NORMAL_ARRAY
, 0);
1243 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, 3, GL_TRUE
, 4, opcode
);
1246 arrays
->array_info_cache_valid
= GL_FALSE
;
1252 __indirect_glColorPointer(GLint size
, GLenum type
, GLsizei stride
,
1253 const GLvoid
* pointer
)
1255 static const uint16_t byte_ops
[5] = {
1256 0, 0, 0, X_GLrop_Color3bv
, X_GLrop_Color4bv
1258 static const uint16_t ubyte_ops
[5] = {
1259 0, 0, 0, X_GLrop_Color3ubv
, X_GLrop_Color4ubv
1261 static const uint16_t short_ops
[5] = {
1262 0, 0, 0, X_GLrop_Color3sv
, X_GLrop_Color4sv
1264 static const uint16_t ushort_ops
[5] = {
1265 0, 0, 0, X_GLrop_Color3usv
, X_GLrop_Color4usv
1267 static const uint16_t int_ops
[5] = {
1268 0, 0, 0, X_GLrop_Color3iv
, X_GLrop_Color4iv
1270 static const uint16_t uint_ops
[5] = {
1271 0, 0, 0, X_GLrop_Color3uiv
, X_GLrop_Color4uiv
1273 static const uint16_t float_ops
[5] = {
1274 0, 0, 0, X_GLrop_Color3fv
, X_GLrop_Color4fv
1276 static const uint16_t double_ops
[5] = {
1277 0, 0, 0, X_GLrop_Color3dv
, X_GLrop_Color4dv
1280 struct glx_context
*gc
= __glXGetCurrentContext();
1281 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1282 struct array_state_vector
*arrays
= state
->array_state
;
1283 struct array_state
*a
;
1286 if (size
< 3 || size
> 4 || stride
< 0) {
1287 __glXSetError(gc
, GL_INVALID_VALUE
);
1293 opcode
= byte_ops
[size
];
1295 case GL_UNSIGNED_BYTE
:
1296 opcode
= ubyte_ops
[size
];
1299 opcode
= short_ops
[size
];
1301 case GL_UNSIGNED_SHORT
:
1302 opcode
= ushort_ops
[size
];
1305 opcode
= int_ops
[size
];
1307 case GL_UNSIGNED_INT
:
1308 opcode
= uint_ops
[size
];
1311 opcode
= float_ops
[size
];
1314 opcode
= double_ops
[size
];
1317 __glXSetError(gc
, GL_INVALID_ENUM
);
1321 a
= get_array_entry(arrays
, GL_COLOR_ARRAY
, 0);
1323 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, size
, GL_TRUE
, 4, opcode
);
1326 arrays
->array_info_cache_valid
= GL_FALSE
;
1332 __indirect_glIndexPointer(GLenum type
, GLsizei stride
, const GLvoid
* pointer
)
1335 struct glx_context
*gc
= __glXGetCurrentContext();
1336 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1337 struct array_state_vector
*arrays
= state
->array_state
;
1338 struct array_state
*a
;
1342 __glXSetError(gc
, GL_INVALID_VALUE
);
1347 case GL_UNSIGNED_BYTE
:
1348 opcode
= X_GLrop_Indexubv
;
1351 opcode
= X_GLrop_Indexsv
;
1354 opcode
= X_GLrop_Indexiv
;
1357 opcode
= X_GLrop_Indexfv
;
1360 opcode
= X_GLrop_Indexdv
;
1363 __glXSetError(gc
, GL_INVALID_ENUM
);
1367 a
= get_array_entry(arrays
, GL_INDEX_ARRAY
, 0);
1369 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, 1, GL_FALSE
, 4, opcode
);
1372 arrays
->array_info_cache_valid
= GL_FALSE
;
1378 __indirect_glEdgeFlagPointer(GLsizei stride
, const GLvoid
* pointer
)
1380 struct glx_context
*gc
= __glXGetCurrentContext();
1381 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1382 struct array_state_vector
*arrays
= state
->array_state
;
1383 struct array_state
*a
;
1387 __glXSetError(gc
, GL_INVALID_VALUE
);
1392 a
= get_array_entry(arrays
, GL_EDGE_FLAG_ARRAY
, 0);
1394 COMMON_ARRAY_DATA_INIT(a
, pointer
, GL_UNSIGNED_BYTE
, stride
, 1, GL_FALSE
,
1395 4, X_GLrop_EdgeFlagv
);
1398 arrays
->array_info_cache_valid
= GL_FALSE
;
1404 __indirect_glTexCoordPointer(GLint size
, GLenum type
, GLsizei stride
,
1405 const GLvoid
* pointer
)
1407 static const uint16_t short_ops
[5] = {
1408 0, X_GLrop_TexCoord1sv
, X_GLrop_TexCoord2sv
, X_GLrop_TexCoord3sv
,
1411 static const uint16_t int_ops
[5] = {
1412 0, X_GLrop_TexCoord1iv
, X_GLrop_TexCoord2iv
, X_GLrop_TexCoord3iv
,
1415 static const uint16_t float_ops
[5] = {
1416 0, X_GLrop_TexCoord1fv
, X_GLrop_TexCoord2fv
, X_GLrop_TexCoord3fv
,
1419 static const uint16_t double_ops
[5] = {
1420 0, X_GLrop_TexCoord1dv
, X_GLrop_TexCoord2dv
, X_GLrop_TexCoord3dv
,
1424 static const uint16_t mshort_ops
[5] = {
1425 0, X_GLrop_MultiTexCoord1svARB
, X_GLrop_MultiTexCoord2svARB
,
1426 X_GLrop_MultiTexCoord3svARB
, X_GLrop_MultiTexCoord4svARB
1428 static const uint16_t mint_ops
[5] = {
1429 0, X_GLrop_MultiTexCoord1ivARB
, X_GLrop_MultiTexCoord2ivARB
,
1430 X_GLrop_MultiTexCoord3ivARB
, X_GLrop_MultiTexCoord4ivARB
1432 static const uint16_t mfloat_ops
[5] = {
1433 0, X_GLrop_MultiTexCoord1fvARB
, X_GLrop_MultiTexCoord2fvARB
,
1434 X_GLrop_MultiTexCoord3fvARB
, X_GLrop_MultiTexCoord4fvARB
1436 static const uint16_t mdouble_ops
[5] = {
1437 0, X_GLrop_MultiTexCoord1dvARB
, X_GLrop_MultiTexCoord2dvARB
,
1438 X_GLrop_MultiTexCoord3dvARB
, X_GLrop_MultiTexCoord4dvARB
1442 struct glx_context
*gc
= __glXGetCurrentContext();
1443 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1444 struct array_state_vector
*arrays
= state
->array_state
;
1445 struct array_state
*a
;
1446 unsigned header_size
;
1450 if (size
< 1 || size
> 4 || stride
< 0) {
1451 __glXSetError(gc
, GL_INVALID_VALUE
);
1455 index
= arrays
->active_texture_unit
;
1459 opcode
= short_ops
[size
];
1462 opcode
= int_ops
[size
];
1465 opcode
= float_ops
[size
];
1468 opcode
= double_ops
[size
];
1471 __glXSetError(gc
, GL_INVALID_ENUM
);
1480 opcode
= mshort_ops
[size
];
1483 opcode
= mint_ops
[size
];
1486 opcode
= mfloat_ops
[size
];
1489 opcode
= mdouble_ops
[size
];
1492 __glXSetError(gc
, GL_INVALID_ENUM
);
1499 a
= get_array_entry(arrays
, GL_TEXTURE_COORD_ARRAY
, index
);
1501 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, size
, GL_FALSE
,
1502 header_size
, opcode
);
1505 arrays
->array_info_cache_valid
= GL_FALSE
;
1511 __indirect_glSecondaryColorPointer(GLint size
, GLenum type
, GLsizei stride
,
1512 const GLvoid
* pointer
)
1515 struct glx_context
*gc
= __glXGetCurrentContext();
1516 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1517 struct array_state_vector
*arrays
= state
->array_state
;
1518 struct array_state
*a
;
1521 if (size
!= 3 || stride
< 0) {
1522 __glXSetError(gc
, GL_INVALID_VALUE
);
1530 case GL_UNSIGNED_BYTE
:
1536 case GL_UNSIGNED_SHORT
:
1542 case GL_UNSIGNED_INT
:
1552 __glXSetError(gc
, GL_INVALID_ENUM
);
1556 a
= get_array_entry(arrays
, GL_SECONDARY_COLOR_ARRAY
, 0);
1558 __glXSetError(gc
, GL_INVALID_OPERATION
);
1562 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, size
, GL_TRUE
, 4, opcode
);
1565 arrays
->array_info_cache_valid
= GL_FALSE
;
1571 __indirect_glFogCoordPointer(GLenum type
, GLsizei stride
,
1572 const GLvoid
* pointer
)
1575 struct glx_context
*gc
= __glXGetCurrentContext();
1576 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1577 struct array_state_vector
*arrays
= state
->array_state
;
1578 struct array_state
*a
;
1582 __glXSetError(gc
, GL_INVALID_VALUE
);
1594 __glXSetError(gc
, GL_INVALID_ENUM
);
1598 a
= get_array_entry(arrays
, GL_FOG_COORD_ARRAY
, 0);
1600 __glXSetError(gc
, GL_INVALID_OPERATION
);
1604 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, 1, GL_FALSE
, 4, opcode
);
1607 arrays
->array_info_cache_valid
= GL_FALSE
;
1613 __indirect_glVertexAttribPointer(GLuint index
, GLint size
,
1614 GLenum type
, GLboolean normalized
,
1615 GLsizei stride
, const GLvoid
* pointer
)
1617 static const uint16_t short_ops
[5] = {
1618 0, X_GLrop_VertexAttrib1svARB
, X_GLrop_VertexAttrib2svARB
,
1619 X_GLrop_VertexAttrib3svARB
, X_GLrop_VertexAttrib4svARB
1621 static const uint16_t float_ops
[5] = {
1622 0, X_GLrop_VertexAttrib1fvARB
, X_GLrop_VertexAttrib2fvARB
,
1623 X_GLrop_VertexAttrib3fvARB
, X_GLrop_VertexAttrib4fvARB
1625 static const uint16_t double_ops
[5] = {
1626 0, X_GLrop_VertexAttrib1dvARB
, X_GLrop_VertexAttrib2dvARB
,
1627 X_GLrop_VertexAttrib3dvARB
, X_GLrop_VertexAttrib4dvARB
1631 struct glx_context
*gc
= __glXGetCurrentContext();
1632 __GLXattribute
*state
= (__GLXattribute
*) (gc
->client_state_private
);
1633 struct array_state_vector
*arrays
= state
->array_state
;
1634 struct array_state
*a
;
1635 unsigned true_immediate_count
;
1636 unsigned true_immediate_size
;
1639 if ((size
< 1) || (size
> 4) || (stride
< 0)
1640 || (index
> arrays
->num_vertex_program_attribs
)) {
1641 __glXSetError(gc
, GL_INVALID_VALUE
);
1645 if (normalized
&& (type
!= GL_FLOAT
) && (type
!= GL_DOUBLE
)) {
1648 opcode
= X_GLrop_VertexAttrib4NbvARB
;
1650 case GL_UNSIGNED_BYTE
:
1651 opcode
= X_GLrop_VertexAttrib4NubvARB
;
1654 opcode
= X_GLrop_VertexAttrib4NsvARB
;
1656 case GL_UNSIGNED_SHORT
:
1657 opcode
= X_GLrop_VertexAttrib4NusvARB
;
1660 opcode
= X_GLrop_VertexAttrib4NivARB
;
1662 case GL_UNSIGNED_INT
:
1663 opcode
= X_GLrop_VertexAttrib4NuivARB
;
1666 __glXSetError(gc
, GL_INVALID_ENUM
);
1670 true_immediate_count
= 4;
1673 true_immediate_count
= size
;
1677 opcode
= X_GLrop_VertexAttrib4bvARB
;
1678 true_immediate_count
= 4;
1680 case GL_UNSIGNED_BYTE
:
1681 opcode
= X_GLrop_VertexAttrib4ubvARB
;
1682 true_immediate_count
= 4;
1685 opcode
= short_ops
[size
];
1687 case GL_UNSIGNED_SHORT
:
1688 opcode
= X_GLrop_VertexAttrib4usvARB
;
1689 true_immediate_count
= 4;
1692 opcode
= X_GLrop_VertexAttrib4ivARB
;
1693 true_immediate_count
= 4;
1695 case GL_UNSIGNED_INT
:
1696 opcode
= X_GLrop_VertexAttrib4uivARB
;
1697 true_immediate_count
= 4;
1700 opcode
= float_ops
[size
];
1703 opcode
= double_ops
[size
];
1706 __glXSetError(gc
, GL_INVALID_ENUM
);
1711 a
= get_array_entry(arrays
, GL_VERTEX_ATTRIB_ARRAY_POINTER
, index
);
1713 __glXSetError(gc
, GL_INVALID_OPERATION
);
1717 COMMON_ARRAY_DATA_INIT(a
, pointer
, type
, stride
, size
, normalized
, 8,
1720 true_immediate_size
= __glXTypeSize(type
) * true_immediate_count
;
1721 a
->header
[0] = __GLX_PAD(8 + true_immediate_size
);
1724 arrays
->array_info_cache_valid
= GL_FALSE
;
1730 * I don't have 100% confidence that this is correct. The different rules
1731 * about whether or not generic vertex attributes alias "classic" vertex
1732 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
1733 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My
1734 * feeling is that the client-side doesn't have to worry about it. The
1735 * client just sends all the data to the server and lets the server deal
1739 __indirect_glVertexAttribPointerNV(GLuint index
, GLint size
,
1740 GLenum type
, GLsizei stride
,
1741 const GLvoid
* pointer
)
1743 struct glx_context
*gc
= __glXGetCurrentContext();
1744 GLboolean normalized
= GL_FALSE
;
1748 case GL_UNSIGNED_BYTE
:
1750 __glXSetError(gc
, GL_INVALID_VALUE
);
1753 normalized
= GL_TRUE
;
1758 __indirect_glVertexAttribPointer(index
, size
, type
,
1759 normalized
, stride
, pointer
);
1762 __glXSetError(gc
, GL_INVALID_ENUM
);
1769 __indirect_glClientActiveTexture(GLenum texture
)
1771 struct glx_context
*const gc
= __glXGetCurrentContext();
1772 __GLXattribute
*const state
=
1773 (__GLXattribute
*) (gc
->client_state_private
);
1774 struct array_state_vector
*const arrays
= state
->array_state
;
1775 const GLint unit
= (GLint
) texture
- GL_TEXTURE0
;
1778 if ((unit
< 0) || (unit
>= arrays
->num_texture_units
)) {
1779 __glXSetError(gc
, GL_INVALID_ENUM
);
1783 arrays
->active_texture_unit
= unit
;
1788 * Modify the enable state for the selected array
1791 __glXSetArrayEnable(__GLXattribute
* state
, GLenum key
, unsigned index
,
1794 struct array_state_vector
*arrays
= state
->array_state
;
1795 struct array_state
*a
;
1798 /* Texture coordinate arrays have an implict index set when the
1799 * application calls glClientActiveTexture.
1801 if (key
== GL_TEXTURE_COORD_ARRAY
) {
1802 index
= arrays
->active_texture_unit
;
1805 a
= get_array_entry(arrays
, key
, index
);
1807 if ((a
!= NULL
) && (a
->enabled
!= enable
)) {
1808 a
->enabled
= enable
;
1809 arrays
->array_info_cache_valid
= GL_FALSE
;
1817 __glXArrayDisableAll(__GLXattribute
* state
)
1819 struct array_state_vector
*arrays
= state
->array_state
;
1823 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
1824 arrays
->arrays
[i
].enabled
= GL_FALSE
;
1827 arrays
->array_info_cache_valid
= GL_FALSE
;
1834 __glXGetArrayEnable(const __GLXattribute
* const state
,
1835 GLenum key
, unsigned index
, GLintptr
* dest
)
1837 const struct array_state_vector
*arrays
= state
->array_state
;
1838 const struct array_state
*a
=
1839 get_array_entry((struct array_state_vector
*) arrays
,
1843 *dest
= (GLintptr
) a
->enabled
;
1853 __glXGetArrayType(const __GLXattribute
* const state
,
1854 GLenum key
, unsigned index
, GLintptr
* dest
)
1856 const struct array_state_vector
*arrays
= state
->array_state
;
1857 const struct array_state
*a
=
1858 get_array_entry((struct array_state_vector
*) arrays
,
1862 *dest
= (GLintptr
) a
->data_type
;
1872 __glXGetArraySize(const __GLXattribute
* const state
,
1873 GLenum key
, unsigned index
, GLintptr
* dest
)
1875 const struct array_state_vector
*arrays
= state
->array_state
;
1876 const struct array_state
*a
=
1877 get_array_entry((struct array_state_vector
*) arrays
,
1881 *dest
= (GLintptr
) a
->count
;
1891 __glXGetArrayStride(const __GLXattribute
* const state
,
1892 GLenum key
, unsigned index
, GLintptr
* dest
)
1894 const struct array_state_vector
*arrays
= state
->array_state
;
1895 const struct array_state
*a
=
1896 get_array_entry((struct array_state_vector
*) arrays
,
1900 *dest
= (GLintptr
) a
->user_stride
;
1910 __glXGetArrayPointer(const __GLXattribute
* const state
,
1911 GLenum key
, unsigned index
, void **dest
)
1913 const struct array_state_vector
*arrays
= state
->array_state
;
1914 const struct array_state
*a
=
1915 get_array_entry((struct array_state_vector
*) arrays
,
1920 *dest
= (void *) (a
->data
);
1930 __glXGetArrayNormalized(const __GLXattribute
* const state
,
1931 GLenum key
, unsigned index
, GLintptr
* dest
)
1933 const struct array_state_vector
*arrays
= state
->array_state
;
1934 const struct array_state
*a
=
1935 get_array_entry((struct array_state_vector
*) arrays
,
1940 *dest
= (GLintptr
) a
->normalized
;
1950 __glXGetActiveTextureUnit(const __GLXattribute
* const state
)
1952 return state
->array_state
->active_texture_unit
;
1957 __glXPushArrayState(__GLXattribute
* state
)
1959 struct array_state_vector
*arrays
= state
->array_state
;
1960 struct array_stack_state
*stack
=
1961 &arrays
->stack
[(arrays
->stack_index
* arrays
->num_arrays
)];
1964 /* XXX are we pushing _all_ the necessary fields? */
1965 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
1966 stack
[i
].data
= arrays
->arrays
[i
].data
;
1967 stack
[i
].data_type
= arrays
->arrays
[i
].data_type
;
1968 stack
[i
].user_stride
= arrays
->arrays
[i
].user_stride
;
1969 stack
[i
].count
= arrays
->arrays
[i
].count
;
1970 stack
[i
].key
= arrays
->arrays
[i
].key
;
1971 stack
[i
].index
= arrays
->arrays
[i
].index
;
1972 stack
[i
].enabled
= arrays
->arrays
[i
].enabled
;
1975 arrays
->active_texture_unit_stack
[arrays
->stack_index
] =
1976 arrays
->active_texture_unit
;
1978 arrays
->stack_index
++;
1983 __glXPopArrayState(__GLXattribute
* state
)
1985 struct array_state_vector
*arrays
= state
->array_state
;
1986 struct array_stack_state
*stack
;
1990 arrays
->stack_index
--;
1991 stack
= &arrays
->stack
[(arrays
->stack_index
* arrays
->num_arrays
)];
1993 for (i
= 0; i
< arrays
->num_arrays
; i
++) {
1994 switch (stack
[i
].key
) {
1995 case GL_NORMAL_ARRAY
:
1996 __indirect_glNormalPointer(stack
[i
].data_type
,
1997 stack
[i
].user_stride
, stack
[i
].data
);
1999 case GL_COLOR_ARRAY
:
2000 __indirect_glColorPointer(stack
[i
].count
,
2002 stack
[i
].user_stride
, stack
[i
].data
);
2004 case GL_INDEX_ARRAY
:
2005 __indirect_glIndexPointer(stack
[i
].data_type
,
2006 stack
[i
].user_stride
, stack
[i
].data
);
2008 case GL_EDGE_FLAG_ARRAY
:
2009 __indirect_glEdgeFlagPointer(stack
[i
].user_stride
, stack
[i
].data
);
2011 case GL_TEXTURE_COORD_ARRAY
:
2012 arrays
->active_texture_unit
= stack
[i
].index
;
2013 __indirect_glTexCoordPointer(stack
[i
].count
,
2015 stack
[i
].user_stride
, stack
[i
].data
);
2017 case GL_SECONDARY_COLOR_ARRAY
:
2018 __indirect_glSecondaryColorPointer(stack
[i
].count
,
2020 stack
[i
].user_stride
,
2023 case GL_FOG_COORDINATE_ARRAY
:
2024 __indirect_glFogCoordPointer(stack
[i
].data_type
,
2025 stack
[i
].user_stride
, stack
[i
].data
);
2030 __glXSetArrayEnable(state
, stack
[i
].key
, stack
[i
].index
,
2034 arrays
->active_texture_unit
=
2035 arrays
->active_texture_unit_stack
[arrays
->stack_index
];