egl_dri2: Fix initialization with EGL_DEFAULT_DISPLAY
[mesa.git] / src / glx / x11 / indirect_vertex_array.c
1 /*
2 * (C) Copyright IBM Corporation 2004, 2005
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * IBM,
20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <inttypes.h>
27 #include <assert.h>
28 #include <string.h>
29
30 #include "glxclient.h"
31 #include "indirect.h"
32 #include <GL/glxproto.h>
33 #include "glxextensions.h"
34 #include "indirect_vertex_array.h"
35 #include "indirect_vertex_array_priv.h"
36
37 #define __GLX_PAD(n) (((n)+3) & ~3)
38
39 /**
40 * \file indirect_vertex_array.c
41 * Implement GLX protocol for vertex arrays and vertex buffer objects.
42 *
43 * The most important function in this fill is \c fill_array_info_cache.
44 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
45 * in the DrawArrays protocol. Certain operations, such as enabling or
46 * disabling an array, can invalidate this cache. \c fill_array_info_cache
47 * fills-in this data. Additionally, it examines the enabled state and
48 * other factors to determine what "version" of DrawArrays protocoal can be
49 * used.
50 *
51 * Current, only two versions of DrawArrays protocol are implemented. The
52 * first version is the "none" protocol. This is the fallback when the
53 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented
54 * by sending batches of immediate mode commands that are equivalent to the
55 * DrawArrays protocol.
56 *
57 * The other protocol that is currently implemented is the "old" protocol.
58 * This is the GL 1.1 DrawArrays protocol. The only difference between GL
59 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
60 * This protocol is called "old" because the ARB is in the process of
61 * defining a new protocol, which will probably be called wither "new" or
62 * "vbo", to support multiple texture coordinate arrays, generic attributes,
63 * and vertex buffer objects.
64 *
65 * \author Ian Romanick <ian.d.romanick@intel.com>
66 */
67
68 static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count);
69 static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count);
70
71 static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
72 const GLvoid * indices);
73 static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
74 const GLvoid * indices);
75
76
77 static GLubyte *emit_element_none(GLubyte * dst,
78 const struct array_state_vector *arrays,
79 unsigned index);
80 static GLubyte *emit_element_old(GLubyte * dst,
81 const struct array_state_vector *arrays,
82 unsigned index);
83 static struct array_state *get_array_entry(const struct array_state_vector
84 *arrays, GLenum key,
85 unsigned index);
86 static void fill_array_info_cache(struct array_state_vector *arrays);
87 static GLboolean validate_mode(__GLXcontext * gc, GLenum mode);
88 static GLboolean validate_count(__GLXcontext * gc, GLsizei count);
89 static GLboolean validate_type(__GLXcontext * gc, GLenum type);
90
91
92 /**
93 * Table of sizes, in bytes, of a GL types. All of the type enums are be in
94 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e.,
95 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the
96 * type enums masked with 0x0f.
97 *
98 * \notes
99 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES,
100 * \c GL_3_BYTES, or \c GL_4_BYTES.
101 */
102 const GLuint __glXTypeSize_table[16] = {
103 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
104 };
105
106
107 /**
108 * Free the per-context array state that was allocated with
109 * __glXInitVertexArrayState().
110 */
111 void
112 __glXFreeVertexArrayState(__GLXcontext * gc)
113 {
114 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
115 struct array_state_vector *arrays = state->array_state;
116
117 if (arrays) {
118 if (arrays->stack) {
119 free(arrays->stack);
120 arrays->stack = NULL;
121 }
122 if (arrays->arrays) {
123 free(arrays->arrays);
124 arrays->arrays = NULL;
125 }
126 free(arrays);
127 state->array_state = NULL;
128 }
129 }
130
131
132 /**
133 * Initialize vertex array state of a GLX context.
134 *
135 * \param gc GLX context whose vertex array state is to be initialized.
136 *
137 * \warning
138 * This function may only be called after __GLXcontext::gl_extension_bits,
139 * __GLXcontext::server_minor, and __GLXcontext::server_major have been
140 * initialized. These values are used to determine what vertex arrays are
141 * supported.
142 *
143 * \bug
144 * Return values from malloc are not properly tested.
145 */
146 void
147 __glXInitVertexArrayState(__GLXcontext * gc)
148 {
149 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
150 struct array_state_vector *arrays;
151
152 unsigned array_count;
153 int texture_units = 1, vertex_program_attribs = 0;
154 unsigned i, j;
155
156 GLboolean got_fog = GL_FALSE;
157 GLboolean got_secondary_color = GL_FALSE;
158
159
160 arrays = calloc(1, sizeof(struct array_state_vector));
161 state->array_state = arrays;
162
163 arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
164 arrays->new_DrawArrays_possible = GL_FALSE;
165 arrays->DrawArrays = NULL;
166
167 arrays->active_texture_unit = 0;
168
169
170 /* Determine how many arrays are actually needed. Only arrays that
171 * are supported by the server are create. For example, if the server
172 * supports only 2 texture units, then only 2 texture coordinate arrays
173 * are created.
174 *
175 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
176 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
177 * GL_EDGE_FLAG_ARRAY are supported.
178 */
179
180 array_count = 5;
181
182 if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit)
183 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
184 got_fog = GL_TRUE;
185 array_count++;
186 }
187
188 if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit)
189 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
190 got_secondary_color = GL_TRUE;
191 array_count++;
192 }
193
194 if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit)
195 || (gc->server_major > 1) || (gc->server_minor >= 3)) {
196 __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units);
197 }
198
199 if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) {
200 __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB,
201 GL_MAX_PROGRAM_ATTRIBS_ARB,
202 &vertex_program_attribs);
203 }
204
205 arrays->num_texture_units = texture_units;
206 arrays->num_vertex_program_attribs = vertex_program_attribs;
207 array_count += texture_units + vertex_program_attribs;
208 arrays->num_arrays = array_count;
209 arrays->arrays = calloc(array_count, sizeof(struct array_state));
210
211 arrays->arrays[0].data_type = GL_FLOAT;
212 arrays->arrays[0].count = 3;
213 arrays->arrays[0].key = GL_NORMAL_ARRAY;
214 arrays->arrays[0].normalized = GL_TRUE;
215 arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
216
217 arrays->arrays[1].data_type = GL_FLOAT;
218 arrays->arrays[1].count = 4;
219 arrays->arrays[1].key = GL_COLOR_ARRAY;
220 arrays->arrays[1].normalized = GL_TRUE;
221 arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
222
223 arrays->arrays[2].data_type = GL_FLOAT;
224 arrays->arrays[2].count = 1;
225 arrays->arrays[2].key = GL_INDEX_ARRAY;
226 arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
227
228 arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
229 arrays->arrays[3].count = 1;
230 arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
231 arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
232
233 for (i = 0; i < texture_units; i++) {
234 arrays->arrays[4 + i].data_type = GL_FLOAT;
235 arrays->arrays[4 + i].count = 4;
236 arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
237
238 arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
239 arrays->arrays[4 + i].index = i;
240
241 arrays->arrays[4 + i].header[1] = i + GL_TEXTURE0;
242 }
243
244 i = 4 + texture_units;
245
246 if (got_fog) {
247 arrays->arrays[i].data_type = GL_FLOAT;
248 arrays->arrays[i].count = 1;
249 arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
250 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
251 i++;
252 }
253
254 if (got_secondary_color) {
255 arrays->arrays[i].data_type = GL_FLOAT;
256 arrays->arrays[i].count = 3;
257 arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
258 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
259 arrays->arrays[i].normalized = GL_TRUE;
260 i++;
261 }
262
263
264 for (j = 0; j < vertex_program_attribs; j++) {
265 const unsigned idx = (vertex_program_attribs - (j + 1));
266
267
268 arrays->arrays[idx + i].data_type = GL_FLOAT;
269 arrays->arrays[idx + i].count = 4;
270 arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
271
272 arrays->arrays[idx + i].old_DrawArrays_possible = 0;
273 arrays->arrays[idx + i].index = idx;
274
275 arrays->arrays[idx + i].header[1] = idx;
276 }
277
278 i += vertex_program_attribs;
279
280
281 /* Vertex array *must* be last becuase of the way that
282 * emit_DrawArrays_none works.
283 */
284
285 arrays->arrays[i].data_type = GL_FLOAT;
286 arrays->arrays[i].count = 4;
287 arrays->arrays[i].key = GL_VERTEX_ARRAY;
288 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
289
290 assert((i + 1) == arrays->num_arrays);
291
292 arrays->stack_index = 0;
293 arrays->stack = malloc(sizeof(struct array_stack_state)
294 * arrays->num_arrays);
295 }
296
297
298 /**
299 * Calculate the size of a single vertex for the "none" protocol. This is
300 * essentially the size of all the immediate-mode commands required to
301 * implement the enabled vertex arrays.
302 */
303 static size_t
304 calculate_single_vertex_size_none(const struct array_state_vector *arrays)
305 {
306 size_t single_vertex_size = 0;
307 unsigned i;
308
309
310 for (i = 0; i < arrays->num_arrays; i++) {
311 if (arrays->arrays[i].enabled) {
312 single_vertex_size += ((uint16_t *) arrays->arrays[i].header)[0];
313 }
314 }
315
316 return single_vertex_size;
317 }
318
319
320 /**
321 * Emit a single element using non-DrawArrays protocol.
322 */
323 GLubyte *
324 emit_element_none(GLubyte * dst,
325 const struct array_state_vector * arrays, unsigned index)
326 {
327 unsigned i;
328
329
330 for (i = 0; i < arrays->num_arrays; i++) {
331 if (arrays->arrays[i].enabled) {
332 const size_t offset = index * arrays->arrays[i].true_stride;
333
334 /* The generic attributes can have more data than is in the
335 * elements. This is because a vertex array can be a 2 element,
336 * normalized, unsigned short, but the "closest" immediate mode
337 * protocol is for a 4Nus. Since the sizes are small, the
338 * performance impact on modern processors should be negligible.
339 */
340 (void) memset(dst, 0, ((uint16_t *) arrays->arrays[i].header)[0]);
341
342 (void) memcpy(dst, arrays->arrays[i].header,
343 arrays->arrays[i].header_size);
344
345 dst += arrays->arrays[i].header_size;
346
347 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
348 arrays->arrays[i].element_size);
349
350 dst += __GLX_PAD(arrays->arrays[i].element_size);
351 }
352 }
353
354 return dst;
355 }
356
357
358 /**
359 * Emit a single element using "old" DrawArrays protocol from
360 * EXT_vertex_arrays / OpenGL 1.1.
361 */
362 GLubyte *
363 emit_element_old(GLubyte * dst,
364 const struct array_state_vector * arrays, unsigned index)
365 {
366 unsigned i;
367
368
369 for (i = 0; i < arrays->num_arrays; i++) {
370 if (arrays->arrays[i].enabled) {
371 const size_t offset = index * arrays->arrays[i].true_stride;
372
373 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
374 arrays->arrays[i].element_size);
375
376 dst += __GLX_PAD(arrays->arrays[i].element_size);
377 }
378 }
379
380 return dst;
381 }
382
383
384 struct array_state *
385 get_array_entry(const struct array_state_vector *arrays,
386 GLenum key, unsigned index)
387 {
388 unsigned i;
389
390 for (i = 0; i < arrays->num_arrays; i++) {
391 if ((arrays->arrays[i].key == key)
392 && (arrays->arrays[i].index == index)) {
393 return &arrays->arrays[i];
394 }
395 }
396
397 return NULL;
398 }
399
400
401 static GLboolean
402 allocate_array_info_cache(struct array_state_vector *arrays,
403 size_t required_size)
404 {
405 #define MAX_HEADER_SIZE 20
406 if (arrays->array_info_cache_buffer_size < required_size) {
407 GLubyte *temp = realloc(arrays->array_info_cache_base,
408 required_size + MAX_HEADER_SIZE);
409
410 if (temp == NULL) {
411 return GL_FALSE;
412 }
413
414 arrays->array_info_cache_base = temp;
415 arrays->array_info_cache = temp + MAX_HEADER_SIZE;
416 arrays->array_info_cache_buffer_size = required_size;
417 }
418
419 arrays->array_info_cache_size = required_size;
420 return GL_TRUE;
421 }
422
423
424 /**
425 */
426 void
427 fill_array_info_cache(struct array_state_vector *arrays)
428 {
429 GLboolean old_DrawArrays_possible;
430 unsigned i;
431
432
433 /* Determine how many arrays are enabled.
434 */
435
436 arrays->enabled_client_array_count = 0;
437 old_DrawArrays_possible = arrays->old_DrawArrays_possible;
438 for (i = 0; i < arrays->num_arrays; i++) {
439 if (arrays->arrays[i].enabled) {
440 arrays->enabled_client_array_count++;
441 old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
442 }
443 }
444
445 if (arrays->new_DrawArrays_possible) {
446 assert(!arrays->new_DrawArrays_possible);
447 }
448 else if (old_DrawArrays_possible) {
449 const size_t required_size = arrays->enabled_client_array_count * 12;
450 uint32_t *info;
451
452
453 if (!allocate_array_info_cache(arrays, required_size)) {
454 return;
455 }
456
457
458 info = (uint32_t *) arrays->array_info_cache;
459 for (i = 0; i < arrays->num_arrays; i++) {
460 if (arrays->arrays[i].enabled) {
461 *(info++) = arrays->arrays[i].data_type;
462 *(info++) = arrays->arrays[i].count;
463 *(info++) = arrays->arrays[i].key;
464 }
465 }
466
467 arrays->DrawArrays = emit_DrawArrays_old;
468 arrays->DrawElements = emit_DrawElements_old;
469 }
470 else {
471 arrays->DrawArrays = emit_DrawArrays_none;
472 arrays->DrawElements = emit_DrawElements_none;
473 }
474
475 arrays->array_info_cache_valid = GL_TRUE;
476 }
477
478
479 /**
480 * Emit a \c glDrawArrays command using the "none" protocol. That is,
481 * emit immediate-mode commands that are equivalent to the requiested
482 * \c glDrawArrays command. This is used with servers that don't support
483 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
484 * vertex state is enabled that is not compatible with that protocol.
485 */
486 void
487 emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count)
488 {
489 __GLXcontext *gc = __glXGetCurrentContext();
490 const __GLXattribute *state =
491 (const __GLXattribute *) (gc->client_state_private);
492 struct array_state_vector *arrays = state->array_state;
493
494 size_t single_vertex_size;
495 GLubyte *pc;
496 unsigned i;
497 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
498 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
499
500
501 single_vertex_size = calculate_single_vertex_size_none(arrays);
502
503 pc = gc->pc;
504
505 (void) memcpy(pc, begin_cmd, 4);
506 *(int *) (pc + 4) = mode;
507
508 pc += 8;
509
510 for (i = 0; i < count; i++) {
511 if ((pc + single_vertex_size) >= gc->bufEnd) {
512 pc = __glXFlushRenderBuffer(gc, pc);
513 }
514
515 pc = emit_element_none(pc, arrays, first + i);
516 }
517
518 if ((pc + 4) >= gc->bufEnd) {
519 pc = __glXFlushRenderBuffer(gc, pc);
520 }
521
522 (void) memcpy(pc, end_cmd, 4);
523 pc += 4;
524
525 gc->pc = pc;
526 if (gc->pc > gc->limit) {
527 (void) __glXFlushRenderBuffer(gc, gc->pc);
528 }
529 }
530
531
532 /**
533 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
534 * protocol.
535 *
536 * \param gc GLX context.
537 * \param arrays Array state.
538 * \param elements_per_request Location to store the number of elements that
539 * can fit in a single Render / RenderLarge
540 * command.
541 * \param total_request Total number of requests for a RenderLarge
542 * command. If a Render command is used, this
543 * will be zero.
544 * \param mode Drawing mode.
545 * \param count Number of vertices.
546 *
547 * \returns
548 * A pointer to the buffer for array data.
549 */
550 static GLubyte *
551 emit_DrawArrays_header_old(__GLXcontext * gc,
552 struct array_state_vector *arrays,
553 size_t * elements_per_request,
554 unsigned int *total_requests,
555 GLenum mode, GLsizei count)
556 {
557 size_t command_size;
558 size_t single_vertex_size;
559 const unsigned header_size = 16;
560 unsigned i;
561 GLubyte *pc;
562
563
564 /* Determine the size of the whole command. This includes the header,
565 * the ARRAY_INFO data and the array data. Once this size is calculated,
566 * it will be known whether a Render or RenderLarge command is needed.
567 */
568
569 single_vertex_size = 0;
570 for (i = 0; i < arrays->num_arrays; i++) {
571 if (arrays->arrays[i].enabled) {
572 single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size);
573 }
574 }
575
576 command_size = arrays->array_info_cache_size + header_size
577 + (single_vertex_size * count);
578
579
580 /* Write the header for either a Render command or a RenderLarge
581 * command. After the header is written, write the ARRAY_INFO data.
582 */
583
584 if (command_size > gc->maxSmallRenderCommandSize) {
585 /* maxSize is the maximum amount of data can be stuffed into a single
586 * packet. sz_xGLXRenderReq is added because bufSize is the maximum
587 * packet size minus sz_xGLXRenderReq.
588 */
589 const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
590 - sz_xGLXRenderLargeReq;
591 unsigned vertex_requests;
592
593
594 /* Calculate the number of data packets that will be required to send
595 * the whole command. To do this, the number of verticies that
596 * will fit in a single buffer must be calculated.
597 *
598 * The important value here is elements_per_request. This is the
599 * number of complete array elements that will fit in a single
600 * buffer. There may be some wasted space at the end of the buffer,
601 * but splitting elements across buffer boundries would be painful.
602 */
603
604 elements_per_request[0] = maxSize / single_vertex_size;
605
606 vertex_requests = (count + elements_per_request[0] - 1)
607 / elements_per_request[0];
608
609 *total_requests = vertex_requests + 1;
610
611
612 __glXFlushRenderBuffer(gc, gc->pc);
613
614 command_size += 4;
615
616 pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
617 *(uint32_t *) (pc + 0) = command_size;
618 *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays;
619 *(uint32_t *) (pc + 8) = count;
620 *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count;
621 *(uint32_t *) (pc + 16) = mode;
622
623 __glXSendLargeChunk(gc, 1, *total_requests, pc,
624 header_size + 4 + arrays->array_info_cache_size);
625
626 pc = gc->pc;
627 }
628 else {
629 if ((gc->pc + command_size) >= gc->bufEnd) {
630 (void) __glXFlushRenderBuffer(gc, gc->pc);
631 }
632
633 pc = gc->pc;
634 *(uint16_t *) (pc + 0) = command_size;
635 *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays;
636 *(uint32_t *) (pc + 4) = count;
637 *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count;
638 *(uint32_t *) (pc + 12) = mode;
639
640 pc += header_size;
641
642 (void) memcpy(pc, arrays->array_info_cache,
643 arrays->array_info_cache_size);
644 pc += arrays->array_info_cache_size;
645
646 *elements_per_request = count;
647 *total_requests = 0;
648 }
649
650
651 return pc;
652 }
653
654
655 /**
656 */
657 void
658 emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count)
659 {
660 __GLXcontext *gc = __glXGetCurrentContext();
661 const __GLXattribute *state =
662 (const __GLXattribute *) (gc->client_state_private);
663 struct array_state_vector *arrays = state->array_state;
664
665 GLubyte *pc;
666 size_t elements_per_request;
667 unsigned total_requests = 0;
668 unsigned i;
669 size_t total_sent = 0;
670
671
672 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
673 &total_requests, mode, count);
674
675
676 /* Write the arrays.
677 */
678
679 if (total_requests == 0) {
680 assert(elements_per_request >= count);
681
682 for (i = 0; i < count; i++) {
683 pc = emit_element_old(pc, arrays, i + first);
684 }
685
686 assert(pc <= gc->bufEnd);
687
688 gc->pc = pc;
689 if (gc->pc > gc->limit) {
690 (void) __glXFlushRenderBuffer(gc, gc->pc);
691 }
692 }
693 else {
694 unsigned req;
695
696
697 for (req = 2; req <= total_requests; req++) {
698 if (count < elements_per_request) {
699 elements_per_request = count;
700 }
701
702 pc = gc->pc;
703 for (i = 0; i < elements_per_request; i++) {
704 pc = emit_element_old(pc, arrays, i + first);
705 }
706
707 first += elements_per_request;
708
709 total_sent += (size_t) (pc - gc->pc);
710 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
711
712 count -= elements_per_request;
713 }
714 }
715 }
716
717
718 void
719 emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
720 const GLvoid * indices)
721 {
722 __GLXcontext *gc = __glXGetCurrentContext();
723 const __GLXattribute *state =
724 (const __GLXattribute *) (gc->client_state_private);
725 struct array_state_vector *arrays = state->array_state;
726 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
727 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
728
729 GLubyte *pc;
730 size_t single_vertex_size;
731 unsigned i;
732
733
734 single_vertex_size = calculate_single_vertex_size_none(arrays);
735
736
737 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
738 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
739 }
740
741 pc = gc->pc;
742
743 (void) memcpy(pc, begin_cmd, 4);
744 *(int *) (pc + 4) = mode;
745
746 pc += 8;
747
748 for (i = 0; i < count; i++) {
749 unsigned index = 0;
750
751 if ((pc + single_vertex_size) >= gc->bufEnd) {
752 pc = __glXFlushRenderBuffer(gc, pc);
753 }
754
755 switch (type) {
756 case GL_UNSIGNED_INT:
757 index = (unsigned) (((GLuint *) indices)[i]);
758 break;
759 case GL_UNSIGNED_SHORT:
760 index = (unsigned) (((GLushort *) indices)[i]);
761 break;
762 case GL_UNSIGNED_BYTE:
763 index = (unsigned) (((GLubyte *) indices)[i]);
764 break;
765 }
766 pc = emit_element_none(pc, arrays, index);
767 }
768
769 if ((pc + 4) >= gc->bufEnd) {
770 pc = __glXFlushRenderBuffer(gc, pc);
771 }
772
773 (void) memcpy(pc, end_cmd, 4);
774 pc += 4;
775
776 gc->pc = pc;
777 if (gc->pc > gc->limit) {
778 (void) __glXFlushRenderBuffer(gc, gc->pc);
779 }
780 }
781
782
783 /**
784 */
785 void
786 emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
787 const GLvoid * indices)
788 {
789 __GLXcontext *gc = __glXGetCurrentContext();
790 const __GLXattribute *state =
791 (const __GLXattribute *) (gc->client_state_private);
792 struct array_state_vector *arrays = state->array_state;
793
794 GLubyte *pc;
795 size_t elements_per_request;
796 unsigned total_requests = 0;
797 unsigned i;
798 unsigned req;
799 unsigned req_element = 0;
800
801
802 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
803 &total_requests, mode, count);
804
805
806 /* Write the arrays.
807 */
808
809 req = 2;
810 while (count > 0) {
811 if (count < elements_per_request) {
812 elements_per_request = count;
813 }
814
815 switch (type) {
816 case GL_UNSIGNED_INT:{
817 const GLuint *ui_ptr = (const GLuint *) indices + req_element;
818
819 for (i = 0; i < elements_per_request; i++) {
820 const GLint index = (GLint) * (ui_ptr++);
821 pc = emit_element_old(pc, arrays, index);
822 }
823 break;
824 }
825 case GL_UNSIGNED_SHORT:{
826 const GLushort *us_ptr = (const GLushort *) indices + req_element;
827
828 for (i = 0; i < elements_per_request; i++) {
829 const GLint index = (GLint) * (us_ptr++);
830 pc = emit_element_old(pc, arrays, index);
831 }
832 break;
833 }
834 case GL_UNSIGNED_BYTE:{
835 const GLubyte *ub_ptr = (const GLubyte *) indices + req_element;
836
837 for (i = 0; i < elements_per_request; i++) {
838 const GLint index = (GLint) * (ub_ptr++);
839 pc = emit_element_old(pc, arrays, index);
840 }
841 break;
842 }
843 }
844
845 if (total_requests != 0) {
846 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
847 pc = gc->pc;
848 req++;
849 }
850
851 count -= elements_per_request;
852 req_element += elements_per_request;
853 }
854
855
856 assert((total_requests == 0) || ((req - 1) == total_requests));
857
858 if (total_requests == 0) {
859 assert(pc <= gc->bufEnd);
860
861 gc->pc = pc;
862 if (gc->pc > gc->limit) {
863 (void) __glXFlushRenderBuffer(gc, gc->pc);
864 }
865 }
866 }
867
868
869 /**
870 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
871 * If it is not valid, then an error code is set in the GLX context.
872 *
873 * \returns
874 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
875 */
876 static GLboolean
877 validate_mode(__GLXcontext * gc, GLenum mode)
878 {
879 switch (mode) {
880 case GL_POINTS:
881 case GL_LINE_STRIP:
882 case GL_LINE_LOOP:
883 case GL_LINES:
884 case GL_TRIANGLE_STRIP:
885 case GL_TRIANGLE_FAN:
886 case GL_TRIANGLES:
887 case GL_QUAD_STRIP:
888 case GL_QUADS:
889 case GL_POLYGON:
890 break;
891 default:
892 __glXSetError(gc, GL_INVALID_ENUM);
893 return GL_FALSE;
894 }
895
896 return GL_TRUE;
897 }
898
899
900 /**
901 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
902 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
903 * being set. A value of zero will not result in an error being set, but
904 * will result in \c GL_FALSE being returned.
905 *
906 * \returns
907 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
908 */
909 static GLboolean
910 validate_count(__GLXcontext * gc, GLsizei count)
911 {
912 if (count < 0) {
913 __glXSetError(gc, GL_INVALID_VALUE);
914 }
915
916 return (count > 0);
917 }
918
919
920 /**
921 * Validate that the \c type parameter to \c glDrawElements, et. al. is
922 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
923 * \c GL_UNSIGNED_INT are valid.
924 *
925 * \returns
926 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
927 */
928 static GLboolean
929 validate_type(__GLXcontext * gc, GLenum type)
930 {
931 switch (type) {
932 case GL_UNSIGNED_INT:
933 case GL_UNSIGNED_SHORT:
934 case GL_UNSIGNED_BYTE:
935 return GL_TRUE;
936 default:
937 __glXSetError(gc, GL_INVALID_ENUM);
938 return GL_FALSE;
939 }
940 }
941
942
943 void
944 __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
945 {
946 __GLXcontext *gc = __glXGetCurrentContext();
947 const __GLXattribute *state =
948 (const __GLXattribute *) (gc->client_state_private);
949 struct array_state_vector *arrays = state->array_state;
950
951
952 if (validate_mode(gc, mode) && validate_count(gc, count)) {
953 if (!arrays->array_info_cache_valid) {
954 fill_array_info_cache(arrays);
955 }
956
957 arrays->DrawArrays(mode, first, count);
958 }
959 }
960
961
962 void
963 __indirect_glArrayElement(GLint index)
964 {
965 __GLXcontext *gc = __glXGetCurrentContext();
966 const __GLXattribute *state =
967 (const __GLXattribute *) (gc->client_state_private);
968 struct array_state_vector *arrays = state->array_state;
969
970 size_t single_vertex_size;
971
972
973 single_vertex_size = calculate_single_vertex_size_none(arrays);
974
975 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
976 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
977 }
978
979 gc->pc = emit_element_none(gc->pc, arrays, index);
980
981 if (gc->pc > gc->limit) {
982 (void) __glXFlushRenderBuffer(gc, gc->pc);
983 }
984 }
985
986
987 void
988 __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
989 const GLvoid * indices)
990 {
991 __GLXcontext *gc = __glXGetCurrentContext();
992 const __GLXattribute *state =
993 (const __GLXattribute *) (gc->client_state_private);
994 struct array_state_vector *arrays = state->array_state;
995
996
997 if (validate_mode(gc, mode) && validate_count(gc, count)
998 && validate_type(gc, type)) {
999 if (!arrays->array_info_cache_valid) {
1000 fill_array_info_cache(arrays);
1001 }
1002
1003 arrays->DrawElements(mode, count, type, indices);
1004 }
1005 }
1006
1007
1008 void
1009 __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
1010 GLsizei count, GLenum type,
1011 const GLvoid * indices)
1012 {
1013 __GLXcontext *gc = __glXGetCurrentContext();
1014 const __GLXattribute *state =
1015 (const __GLXattribute *) (gc->client_state_private);
1016 struct array_state_vector *arrays = state->array_state;
1017
1018
1019 if (validate_mode(gc, mode) && validate_count(gc, count)
1020 && validate_type(gc, type)) {
1021 if (end < start) {
1022 __glXSetError(gc, GL_INVALID_VALUE);
1023 return;
1024 }
1025
1026 if (!arrays->array_info_cache_valid) {
1027 fill_array_info_cache(arrays);
1028 }
1029
1030 arrays->DrawElements(mode, count, type, indices);
1031 }
1032 }
1033
1034
1035 void
1036 __indirect_glMultiDrawArraysEXT(GLenum mode, GLint * first, GLsizei * count,
1037 GLsizei primcount)
1038 {
1039 __GLXcontext *gc = __glXGetCurrentContext();
1040 const __GLXattribute *state =
1041 (const __GLXattribute *) (gc->client_state_private);
1042 struct array_state_vector *arrays = state->array_state;
1043 GLsizei i;
1044
1045
1046 if (validate_mode(gc, mode)) {
1047 if (!arrays->array_info_cache_valid) {
1048 fill_array_info_cache(arrays);
1049 }
1050
1051 for (i = 0; i < primcount; i++) {
1052 if (validate_count(gc, count[i])) {
1053 arrays->DrawArrays(mode, first[i], count[i]);
1054 }
1055 }
1056 }
1057 }
1058
1059
1060 void
1061 __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count,
1062 GLenum type, const GLvoid ** indices,
1063 GLsizei primcount)
1064 {
1065 __GLXcontext *gc = __glXGetCurrentContext();
1066 const __GLXattribute *state =
1067 (const __GLXattribute *) (gc->client_state_private);
1068 struct array_state_vector *arrays = state->array_state;
1069 GLsizei i;
1070
1071
1072 if (validate_mode(gc, mode) && validate_type(gc, type)) {
1073 if (!arrays->array_info_cache_valid) {
1074 fill_array_info_cache(arrays);
1075 }
1076
1077 for (i = 0; i < primcount; i++) {
1078 if (validate_count(gc, count[i])) {
1079 arrays->DrawElements(mode, count[i], type, indices[i]);
1080 }
1081 }
1082 }
1083 }
1084
1085
1086 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
1087 do { \
1088 (a)->data = PTR; \
1089 (a)->data_type = TYPE; \
1090 (a)->user_stride = STRIDE; \
1091 (a)->count = COUNT; \
1092 (a)->normalized = NORMALIZED; \
1093 \
1094 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \
1095 (a)->true_stride = (STRIDE == 0) \
1096 ? (a)->element_size : STRIDE; \
1097 \
1098 (a)->header_size = HDR_SIZE; \
1099 ((uint16_t *) (a)->header)[0] = __GLX_PAD((a)->header_size + (a)->element_size); \
1100 ((uint16_t *) (a)->header)[1] = OPCODE; \
1101 } while(0)
1102
1103
1104 void
1105 __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride,
1106 const GLvoid * pointer)
1107 {
1108 static const uint16_t short_ops[5] = {
1109 0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
1110 };
1111 static const uint16_t int_ops[5] = {
1112 0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
1113 };
1114 static const uint16_t float_ops[5] = {
1115 0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
1116 };
1117 static const uint16_t double_ops[5] = {
1118 0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
1119 };
1120 uint16_t opcode;
1121 __GLXcontext *gc = __glXGetCurrentContext();
1122 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1123 struct array_state_vector *arrays = state->array_state;
1124 struct array_state *a;
1125
1126
1127 if (size < 2 || size > 4 || stride < 0) {
1128 __glXSetError(gc, GL_INVALID_VALUE);
1129 return;
1130 }
1131
1132 switch (type) {
1133 case GL_SHORT:
1134 opcode = short_ops[size];
1135 break;
1136 case GL_INT:
1137 opcode = int_ops[size];
1138 break;
1139 case GL_FLOAT:
1140 opcode = float_ops[size];
1141 break;
1142 case GL_DOUBLE:
1143 opcode = double_ops[size];
1144 break;
1145 default:
1146 __glXSetError(gc, GL_INVALID_ENUM);
1147 return;
1148 }
1149
1150 a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0);
1151 assert(a != NULL);
1152 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4,
1153 opcode);
1154
1155 if (a->enabled) {
1156 arrays->array_info_cache_valid = GL_FALSE;
1157 }
1158 }
1159
1160
1161 void
1162 __indirect_glNormalPointer(GLenum type, GLsizei stride,
1163 const GLvoid * pointer)
1164 {
1165 uint16_t opcode;
1166 __GLXcontext *gc = __glXGetCurrentContext();
1167 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1168 struct array_state_vector *arrays = state->array_state;
1169 struct array_state *a;
1170
1171
1172 if (stride < 0) {
1173 __glXSetError(gc, GL_INVALID_VALUE);
1174 return;
1175 }
1176
1177 switch (type) {
1178 case GL_BYTE:
1179 opcode = X_GLrop_Normal3bv;
1180 break;
1181 case GL_SHORT:
1182 opcode = X_GLrop_Normal3sv;
1183 break;
1184 case GL_INT:
1185 opcode = X_GLrop_Normal3iv;
1186 break;
1187 case GL_FLOAT:
1188 opcode = X_GLrop_Normal3fv;
1189 break;
1190 case GL_DOUBLE:
1191 opcode = X_GLrop_Normal3dv;
1192 break;
1193 default:
1194 __glXSetError(gc, GL_INVALID_ENUM);
1195 return;
1196 }
1197
1198 a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0);
1199 assert(a != NULL);
1200 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode);
1201
1202 if (a->enabled) {
1203 arrays->array_info_cache_valid = GL_FALSE;
1204 }
1205 }
1206
1207
1208 void
1209 __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride,
1210 const GLvoid * pointer)
1211 {
1212 static const uint16_t byte_ops[5] = {
1213 0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
1214 };
1215 static const uint16_t ubyte_ops[5] = {
1216 0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
1217 };
1218 static const uint16_t short_ops[5] = {
1219 0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
1220 };
1221 static const uint16_t ushort_ops[5] = {
1222 0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
1223 };
1224 static const uint16_t int_ops[5] = {
1225 0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
1226 };
1227 static const uint16_t uint_ops[5] = {
1228 0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
1229 };
1230 static const uint16_t float_ops[5] = {
1231 0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
1232 };
1233 static const uint16_t double_ops[5] = {
1234 0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
1235 };
1236 uint16_t opcode;
1237 __GLXcontext *gc = __glXGetCurrentContext();
1238 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1239 struct array_state_vector *arrays = state->array_state;
1240 struct array_state *a;
1241
1242
1243 if (size < 3 || size > 4 || stride < 0) {
1244 __glXSetError(gc, GL_INVALID_VALUE);
1245 return;
1246 }
1247
1248 switch (type) {
1249 case GL_BYTE:
1250 opcode = byte_ops[size];
1251 break;
1252 case GL_UNSIGNED_BYTE:
1253 opcode = ubyte_ops[size];
1254 break;
1255 case GL_SHORT:
1256 opcode = short_ops[size];
1257 break;
1258 case GL_UNSIGNED_SHORT:
1259 opcode = ushort_ops[size];
1260 break;
1261 case GL_INT:
1262 opcode = int_ops[size];
1263 break;
1264 case GL_UNSIGNED_INT:
1265 opcode = uint_ops[size];
1266 break;
1267 case GL_FLOAT:
1268 opcode = float_ops[size];
1269 break;
1270 case GL_DOUBLE:
1271 opcode = double_ops[size];
1272 break;
1273 default:
1274 __glXSetError(gc, GL_INVALID_ENUM);
1275 return;
1276 }
1277
1278 a = get_array_entry(arrays, GL_COLOR_ARRAY, 0);
1279 assert(a != NULL);
1280 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1281
1282 if (a->enabled) {
1283 arrays->array_info_cache_valid = GL_FALSE;
1284 }
1285 }
1286
1287
1288 void
1289 __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer)
1290 {
1291 uint16_t opcode;
1292 __GLXcontext *gc = __glXGetCurrentContext();
1293 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1294 struct array_state_vector *arrays = state->array_state;
1295 struct array_state *a;
1296
1297
1298 if (stride < 0) {
1299 __glXSetError(gc, GL_INVALID_VALUE);
1300 return;
1301 }
1302
1303 switch (type) {
1304 case GL_UNSIGNED_BYTE:
1305 opcode = X_GLrop_Indexubv;
1306 break;
1307 case GL_SHORT:
1308 opcode = X_GLrop_Indexsv;
1309 break;
1310 case GL_INT:
1311 opcode = X_GLrop_Indexiv;
1312 break;
1313 case GL_FLOAT:
1314 opcode = X_GLrop_Indexfv;
1315 break;
1316 case GL_DOUBLE:
1317 opcode = X_GLrop_Indexdv;
1318 break;
1319 default:
1320 __glXSetError(gc, GL_INVALID_ENUM);
1321 return;
1322 }
1323
1324 a = get_array_entry(arrays, GL_INDEX_ARRAY, 0);
1325 assert(a != NULL);
1326 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1327
1328 if (a->enabled) {
1329 arrays->array_info_cache_valid = GL_FALSE;
1330 }
1331 }
1332
1333
1334 void
1335 __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer)
1336 {
1337 __GLXcontext *gc = __glXGetCurrentContext();
1338 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1339 struct array_state_vector *arrays = state->array_state;
1340 struct array_state *a;
1341
1342
1343 if (stride < 0) {
1344 __glXSetError(gc, GL_INVALID_VALUE);
1345 return;
1346 }
1347
1348
1349 a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0);
1350 assert(a != NULL);
1351 COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
1352 4, X_GLrop_EdgeFlagv);
1353
1354 if (a->enabled) {
1355 arrays->array_info_cache_valid = GL_FALSE;
1356 }
1357 }
1358
1359
1360 void
1361 __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride,
1362 const GLvoid * pointer)
1363 {
1364 static const uint16_t short_ops[5] = {
1365 0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv,
1366 X_GLrop_TexCoord4sv
1367 };
1368 static const uint16_t int_ops[5] = {
1369 0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv,
1370 X_GLrop_TexCoord4iv
1371 };
1372 static const uint16_t float_ops[5] = {
1373 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv,
1374 X_GLrop_TexCoord4fv
1375 };
1376 static const uint16_t double_ops[5] = {
1377 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv,
1378 X_GLrop_TexCoord4dv
1379 };
1380
1381 static const uint16_t mshort_ops[5] = {
1382 0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB,
1383 X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
1384 };
1385 static const uint16_t mint_ops[5] = {
1386 0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB,
1387 X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
1388 };
1389 static const uint16_t mfloat_ops[5] = {
1390 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2fvARB,
1391 X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
1392 };
1393 static const uint16_t mdouble_ops[5] = {
1394 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB,
1395 X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
1396 };
1397
1398 uint16_t opcode;
1399 __GLXcontext *gc = __glXGetCurrentContext();
1400 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1401 struct array_state_vector *arrays = state->array_state;
1402 struct array_state *a;
1403 unsigned header_size;
1404 unsigned index;
1405
1406
1407 if (size < 1 || size > 4 || stride < 0) {
1408 __glXSetError(gc, GL_INVALID_VALUE);
1409 return;
1410 }
1411
1412 index = arrays->active_texture_unit;
1413 if (index == 0) {
1414 switch (type) {
1415 case GL_SHORT:
1416 opcode = short_ops[size];
1417 break;
1418 case GL_INT:
1419 opcode = int_ops[size];
1420 break;
1421 case GL_FLOAT:
1422 opcode = float_ops[size];
1423 break;
1424 case GL_DOUBLE:
1425 opcode = double_ops[size];
1426 break;
1427 default:
1428 __glXSetError(gc, GL_INVALID_ENUM);
1429 return;
1430 }
1431
1432 header_size = 4;
1433 }
1434 else {
1435 switch (type) {
1436 case GL_SHORT:
1437 opcode = mshort_ops[size];
1438 break;
1439 case GL_INT:
1440 opcode = mint_ops[size];
1441 break;
1442 case GL_FLOAT:
1443 opcode = mfloat_ops[size];
1444 break;
1445 case GL_DOUBLE:
1446 opcode = mdouble_ops[size];
1447 break;
1448 default:
1449 __glXSetError(gc, GL_INVALID_ENUM);
1450 return;
1451 }
1452
1453 header_size = 8;
1454 }
1455
1456 a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index);
1457 assert(a != NULL);
1458 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE,
1459 header_size, opcode);
1460
1461 if (a->enabled) {
1462 arrays->array_info_cache_valid = GL_FALSE;
1463 }
1464 }
1465
1466
1467 void
1468 __indirect_glSecondaryColorPointerEXT(GLint size, GLenum type, GLsizei stride,
1469 const GLvoid * pointer)
1470 {
1471 uint16_t opcode;
1472 __GLXcontext *gc = __glXGetCurrentContext();
1473 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1474 struct array_state_vector *arrays = state->array_state;
1475 struct array_state *a;
1476
1477
1478 if (size != 3 || stride < 0) {
1479 __glXSetError(gc, GL_INVALID_VALUE);
1480 return;
1481 }
1482
1483 switch (type) {
1484 case GL_BYTE:
1485 opcode = 4126;
1486 break;
1487 case GL_UNSIGNED_BYTE:
1488 opcode = 4131;
1489 break;
1490 case GL_SHORT:
1491 opcode = 4127;
1492 break;
1493 case GL_UNSIGNED_SHORT:
1494 opcode = 4132;
1495 break;
1496 case GL_INT:
1497 opcode = 4128;
1498 break;
1499 case GL_UNSIGNED_INT:
1500 opcode = 4133;
1501 break;
1502 case GL_FLOAT:
1503 opcode = 4129;
1504 break;
1505 case GL_DOUBLE:
1506 opcode = 4130;
1507 break;
1508 default:
1509 __glXSetError(gc, GL_INVALID_ENUM);
1510 return;
1511 }
1512
1513 a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0);
1514 if (a == NULL) {
1515 __glXSetError(gc, GL_INVALID_OPERATION);
1516 return;
1517 }
1518
1519 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1520
1521 if (a->enabled) {
1522 arrays->array_info_cache_valid = GL_FALSE;
1523 }
1524 }
1525
1526
1527 void
1528 __indirect_glFogCoordPointerEXT(GLenum type, GLsizei stride,
1529 const GLvoid * pointer)
1530 {
1531 uint16_t opcode;
1532 __GLXcontext *gc = __glXGetCurrentContext();
1533 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1534 struct array_state_vector *arrays = state->array_state;
1535 struct array_state *a;
1536
1537
1538 if (stride < 0) {
1539 __glXSetError(gc, GL_INVALID_VALUE);
1540 return;
1541 }
1542
1543 switch (type) {
1544 case GL_FLOAT:
1545 opcode = 4124;
1546 break;
1547 case GL_DOUBLE:
1548 opcode = 4125;
1549 break;
1550 default:
1551 __glXSetError(gc, GL_INVALID_ENUM);
1552 return;
1553 }
1554
1555 a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0);
1556 if (a == NULL) {
1557 __glXSetError(gc, GL_INVALID_OPERATION);
1558 return;
1559 }
1560
1561 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1562
1563 if (a->enabled) {
1564 arrays->array_info_cache_valid = GL_FALSE;
1565 }
1566 }
1567
1568
1569 void
1570 __indirect_glVertexAttribPointerARB(GLuint index, GLint size,
1571 GLenum type, GLboolean normalized,
1572 GLsizei stride, const GLvoid * pointer)
1573 {
1574 static const uint16_t short_ops[5] = { 0, 4189, 4190, 4191, 4192 };
1575 static const uint16_t float_ops[5] = { 0, 4193, 4194, 4195, 4196 };
1576 static const uint16_t double_ops[5] = { 0, 4197, 4198, 4199, 4200 };
1577
1578 uint16_t opcode;
1579 __GLXcontext *gc = __glXGetCurrentContext();
1580 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1581 struct array_state_vector *arrays = state->array_state;
1582 struct array_state *a;
1583 unsigned true_immediate_count;
1584 unsigned true_immediate_size;
1585
1586
1587 if ((size < 1) || (size > 4) || (stride < 0)
1588 || (index > arrays->num_vertex_program_attribs)) {
1589 __glXSetError(gc, GL_INVALID_VALUE);
1590 return;
1591 }
1592
1593 if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
1594 switch (type) {
1595 case GL_BYTE:
1596 opcode = X_GLrop_VertexAttrib4NbvARB;
1597 break;
1598 case GL_UNSIGNED_BYTE:
1599 opcode = X_GLrop_VertexAttrib4NubvARB;
1600 break;
1601 case GL_SHORT:
1602 opcode = X_GLrop_VertexAttrib4NsvARB;
1603 break;
1604 case GL_UNSIGNED_SHORT:
1605 opcode = X_GLrop_VertexAttrib4NusvARB;
1606 break;
1607 case GL_INT:
1608 opcode = X_GLrop_VertexAttrib4NivARB;
1609 break;
1610 case GL_UNSIGNED_INT:
1611 opcode = X_GLrop_VertexAttrib4NuivARB;
1612 break;
1613 default:
1614 __glXSetError(gc, GL_INVALID_ENUM);
1615 return;
1616 }
1617
1618 true_immediate_count = 4;
1619 }
1620 else {
1621 true_immediate_count = size;
1622
1623 switch (type) {
1624 case GL_BYTE:
1625 opcode = X_GLrop_VertexAttrib4bvARB;
1626 true_immediate_count = 4;
1627 break;
1628 case GL_UNSIGNED_BYTE:
1629 opcode = X_GLrop_VertexAttrib4ubvARB;
1630 true_immediate_count = 4;
1631 break;
1632 case GL_SHORT:
1633 opcode = short_ops[size];
1634 break;
1635 case GL_UNSIGNED_SHORT:
1636 opcode = X_GLrop_VertexAttrib4usvARB;
1637 true_immediate_count = 4;
1638 break;
1639 case GL_INT:
1640 opcode = X_GLrop_VertexAttrib4ivARB;
1641 true_immediate_count = 4;
1642 break;
1643 case GL_UNSIGNED_INT:
1644 opcode = X_GLrop_VertexAttrib4uivARB;
1645 true_immediate_count = 4;
1646 break;
1647 case GL_FLOAT:
1648 opcode = float_ops[size];
1649 break;
1650 case GL_DOUBLE:
1651 opcode = double_ops[size];
1652 break;
1653 default:
1654 __glXSetError(gc, GL_INVALID_ENUM);
1655 return;
1656 }
1657 }
1658
1659 a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index);
1660 if (a == NULL) {
1661 __glXSetError(gc, GL_INVALID_OPERATION);
1662 return;
1663 }
1664
1665 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8,
1666 opcode);
1667
1668 true_immediate_size = __glXTypeSize(type) * true_immediate_count;
1669 ((uint16_t *) (a)->header)[0] = __GLX_PAD(a->header_size
1670 + true_immediate_size);
1671
1672 if (a->enabled) {
1673 arrays->array_info_cache_valid = GL_FALSE;
1674 }
1675 }
1676
1677
1678 /**
1679 * I don't have 100% confidence that this is correct. The different rules
1680 * about whether or not generic vertex attributes alias "classic" vertex
1681 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
1682 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My
1683 * feeling is that the client-side doesn't have to worry about it. The
1684 * client just sends all the data to the server and lets the server deal
1685 * with it.
1686 */
1687 void
1688 __indirect_glVertexAttribPointerNV(GLuint index, GLint size,
1689 GLenum type, GLsizei stride,
1690 const GLvoid * pointer)
1691 {
1692 __GLXcontext *gc = __glXGetCurrentContext();
1693 GLboolean normalized = GL_FALSE;
1694
1695
1696 switch (type) {
1697 case GL_UNSIGNED_BYTE:
1698 if (size != 4) {
1699 __glXSetError(gc, GL_INVALID_VALUE);
1700 return;
1701 }
1702 normalized = GL_TRUE;
1703
1704 case GL_SHORT:
1705 case GL_FLOAT:
1706 case GL_DOUBLE:
1707 __indirect_glVertexAttribPointerARB(index, size, type,
1708 normalized, stride, pointer);
1709 return;
1710 default:
1711 __glXSetError(gc, GL_INVALID_ENUM);
1712 return;
1713 }
1714 }
1715
1716
1717 void
1718 __indirect_glClientActiveTextureARB(GLenum texture)
1719 {
1720 __GLXcontext *const gc = __glXGetCurrentContext();
1721 __GLXattribute *const state =
1722 (__GLXattribute *) (gc->client_state_private);
1723 struct array_state_vector *const arrays = state->array_state;
1724 const GLint unit = (GLint) texture - GL_TEXTURE0;
1725
1726
1727 if ((unit < 0) || (unit >= arrays->num_texture_units)) {
1728 __glXSetError(gc, GL_INVALID_ENUM);
1729 return;
1730 }
1731
1732 arrays->active_texture_unit = unit;
1733 }
1734
1735
1736 /**
1737 * Modify the enable state for the selected array
1738 */
1739 GLboolean
1740 __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index,
1741 GLboolean enable)
1742 {
1743 struct array_state_vector *arrays = state->array_state;
1744 struct array_state *a;
1745
1746
1747 /* Texture coordinate arrays have an implict index set when the
1748 * application calls glClientActiveTexture.
1749 */
1750 if (key == GL_TEXTURE_COORD_ARRAY) {
1751 index = arrays->active_texture_unit;
1752 }
1753
1754 a = get_array_entry(arrays, key, index);
1755
1756 if ((a != NULL) && (a->enabled != enable)) {
1757 a->enabled = enable;
1758 arrays->array_info_cache_valid = GL_FALSE;
1759 }
1760
1761 return (a != NULL);
1762 }
1763
1764
1765 void
1766 __glXArrayDisableAll(__GLXattribute * state)
1767 {
1768 struct array_state_vector *arrays = state->array_state;
1769 unsigned i;
1770
1771
1772 for (i = 0; i < arrays->num_arrays; i++) {
1773 arrays->arrays[i].enabled = GL_FALSE;
1774 }
1775
1776 arrays->array_info_cache_valid = GL_FALSE;
1777 }
1778
1779
1780 /**
1781 */
1782 GLboolean
1783 __glXGetArrayEnable(const __GLXattribute * const state,
1784 GLenum key, unsigned index, GLintptr * dest)
1785 {
1786 const struct array_state_vector *arrays = state->array_state;
1787 const struct array_state *a =
1788 get_array_entry((struct array_state_vector *) arrays,
1789 key, index);
1790
1791 if (a != NULL) {
1792 *dest = (GLintptr) a->enabled;
1793 }
1794
1795 return (a != NULL);
1796 }
1797
1798
1799 /**
1800 */
1801 GLboolean
1802 __glXGetArrayType(const __GLXattribute * const state,
1803 GLenum key, unsigned index, GLintptr * dest)
1804 {
1805 const struct array_state_vector *arrays = state->array_state;
1806 const struct array_state *a =
1807 get_array_entry((struct array_state_vector *) arrays,
1808 key, index);
1809
1810 if (a != NULL) {
1811 *dest = (GLintptr) a->data_type;
1812 }
1813
1814 return (a != NULL);
1815 }
1816
1817
1818 /**
1819 */
1820 GLboolean
1821 __glXGetArraySize(const __GLXattribute * const state,
1822 GLenum key, unsigned index, GLintptr * dest)
1823 {
1824 const struct array_state_vector *arrays = state->array_state;
1825 const struct array_state *a =
1826 get_array_entry((struct array_state_vector *) arrays,
1827 key, index);
1828
1829 if (a != NULL) {
1830 *dest = (GLintptr) a->count;
1831 }
1832
1833 return (a != NULL);
1834 }
1835
1836
1837 /**
1838 */
1839 GLboolean
1840 __glXGetArrayStride(const __GLXattribute * const state,
1841 GLenum key, unsigned index, GLintptr * dest)
1842 {
1843 const struct array_state_vector *arrays = state->array_state;
1844 const struct array_state *a =
1845 get_array_entry((struct array_state_vector *) arrays,
1846 key, index);
1847
1848 if (a != NULL) {
1849 *dest = (GLintptr) a->user_stride;
1850 }
1851
1852 return (a != NULL);
1853 }
1854
1855
1856 /**
1857 */
1858 GLboolean
1859 __glXGetArrayPointer(const __GLXattribute * const state,
1860 GLenum key, unsigned index, void **dest)
1861 {
1862 const struct array_state_vector *arrays = state->array_state;
1863 const struct array_state *a =
1864 get_array_entry((struct array_state_vector *) arrays,
1865 key, index);
1866
1867
1868 if (a != NULL) {
1869 *dest = (void *) (a->data);
1870 }
1871
1872 return (a != NULL);
1873 }
1874
1875
1876 /**
1877 */
1878 GLboolean
1879 __glXGetArrayNormalized(const __GLXattribute * const state,
1880 GLenum key, unsigned index, GLintptr * dest)
1881 {
1882 const struct array_state_vector *arrays = state->array_state;
1883 const struct array_state *a =
1884 get_array_entry((struct array_state_vector *) arrays,
1885 key, index);
1886
1887
1888 if (a != NULL) {
1889 *dest = (GLintptr) a->normalized;
1890 }
1891
1892 return (a != NULL);
1893 }
1894
1895
1896 /**
1897 */
1898 GLuint
1899 __glXGetActiveTextureUnit(const __GLXattribute * const state)
1900 {
1901 return state->array_state->active_texture_unit;
1902 }
1903
1904
1905 void
1906 __glXPushArrayState(__GLXattribute * state)
1907 {
1908 struct array_state_vector *arrays = state->array_state;
1909 struct array_stack_state *stack =
1910 &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1911 unsigned i;
1912
1913 /* XXX are we pushing _all_ the necessary fields? */
1914 for (i = 0; i < arrays->num_arrays; i++) {
1915 stack[i].data = arrays->arrays[i].data;
1916 stack[i].data_type = arrays->arrays[i].data_type;
1917 stack[i].user_stride = arrays->arrays[i].user_stride;
1918 stack[i].count = arrays->arrays[i].count;
1919 stack[i].key = arrays->arrays[i].key;
1920 stack[i].index = arrays->arrays[i].index;
1921 stack[i].enabled = arrays->arrays[i].enabled;
1922 }
1923
1924 arrays->active_texture_unit_stack[arrays->stack_index] =
1925 arrays->active_texture_unit;
1926
1927 arrays->stack_index++;
1928 }
1929
1930
1931 void
1932 __glXPopArrayState(__GLXattribute * state)
1933 {
1934 struct array_state_vector *arrays = state->array_state;
1935 struct array_stack_state *stack;
1936 unsigned i;
1937
1938
1939 arrays->stack_index--;
1940 stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1941
1942 for (i = 0; i < arrays->num_arrays; i++) {
1943 switch (stack[i].key) {
1944 case GL_NORMAL_ARRAY:
1945 __indirect_glNormalPointer(stack[i].data_type,
1946 stack[i].user_stride, stack[i].data);
1947 break;
1948 case GL_COLOR_ARRAY:
1949 __indirect_glColorPointer(stack[i].count,
1950 stack[i].data_type,
1951 stack[i].user_stride, stack[i].data);
1952 break;
1953 case GL_INDEX_ARRAY:
1954 __indirect_glIndexPointer(stack[i].data_type,
1955 stack[i].user_stride, stack[i].data);
1956 break;
1957 case GL_EDGE_FLAG_ARRAY:
1958 __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data);
1959 break;
1960 case GL_TEXTURE_COORD_ARRAY:
1961 arrays->active_texture_unit = stack[i].index;
1962 __indirect_glTexCoordPointer(stack[i].count,
1963 stack[i].data_type,
1964 stack[i].user_stride, stack[i].data);
1965 break;
1966 case GL_SECONDARY_COLOR_ARRAY:
1967 __indirect_glSecondaryColorPointerEXT(stack[i].count,
1968 stack[i].data_type,
1969 stack[i].user_stride,
1970 stack[i].data);
1971 break;
1972 case GL_FOG_COORDINATE_ARRAY:
1973 __indirect_glFogCoordPointerEXT(stack[i].data_type,
1974 stack[i].user_stride, stack[i].data);
1975 break;
1976
1977 }
1978
1979 __glXSetArrayEnable(state, stack[i].key, stack[i].index,
1980 stack[i].enabled);
1981 }
1982
1983 arrays->active_texture_unit =
1984 arrays->active_texture_unit_stack[arrays->stack_index];
1985 }