glx/indirect: explicitly assign struct components for glx_*_vtable
[mesa.git] / src / glx / indirect_glx.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Soft-
6 * ware"), to deal in the Software without restriction, including without
7 * limitation the rights to use, copy, modify, merge, publish, distribute,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, provided that the above copyright
10 * notice(s) and this permission notice appear in all copies of the Soft-
11 * ware and that both the above copyright notice(s) and this permission
12 * notice appear in supporting documentation.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY
17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE-
19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR-
22 * MANCE OF THIS SOFTWARE.
23 *
24 * Except as contained in this notice, the name of a copyright holder shall
25 * not be used in advertising or otherwise to promote the sale, use or
26 * other dealings in this Software without prior written authorization of
27 * the copyright holder.
28 *
29 * Authors:
30 * Kristian Høgsberg (krh@bitplanet.net)
31 */
32
33 #include "glapi.h"
34 #include "glxclient.h"
35
36 extern struct _glapi_table *__glXNewIndirectAPI(void);
37
38 /*
39 ** All indirect rendering contexts will share the same indirect dispatch table.
40 */
41 static struct _glapi_table *IndirectAPI = NULL;
42
43 static void
44 indirect_destroy_context(struct glx_context *gc)
45 {
46 __glXFreeVertexArrayState(gc);
47
48 free((char *) gc->vendor);
49 free((char *) gc->renderer);
50 free((char *) gc->version);
51 free((char *) gc->extensions);
52 __glFreeAttributeState(gc);
53 free((char *) gc->buf);
54 free((char *) gc->client_state_private);
55 free((char *) gc);
56 }
57
58 static Bool
59 SendMakeCurrentRequest(Display * dpy, CARD8 opcode,
60 GLXContextID gc_id, GLXContextTag gc_tag,
61 GLXDrawable draw, GLXDrawable read,
62 GLXContextTag *out_tag)
63 {
64 xGLXMakeCurrentReply reply;
65 Bool ret;
66
67 LockDisplay(dpy);
68
69 if (draw == read) {
70 xGLXMakeCurrentReq *req;
71
72 GetReq(GLXMakeCurrent, req);
73 req->reqType = opcode;
74 req->glxCode = X_GLXMakeCurrent;
75 req->drawable = draw;
76 req->context = gc_id;
77 req->oldContextTag = gc_tag;
78 }
79 else {
80 struct glx_display *priv = __glXInitialize(dpy);
81
82 /* If the server can support the GLX 1.3 version, we should
83 * perfer that. Not only that, some servers support GLX 1.3 but
84 * not the SGI extension.
85 */
86
87 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) {
88 xGLXMakeContextCurrentReq *req;
89
90 GetReq(GLXMakeContextCurrent, req);
91 req->reqType = opcode;
92 req->glxCode = X_GLXMakeContextCurrent;
93 req->drawable = draw;
94 req->readdrawable = read;
95 req->context = gc_id;
96 req->oldContextTag = gc_tag;
97 }
98 else {
99 xGLXVendorPrivateWithReplyReq *vpreq;
100 xGLXMakeCurrentReadSGIReq *req;
101
102 GetReqExtra(GLXVendorPrivateWithReply,
103 sz_xGLXMakeCurrentReadSGIReq -
104 sz_xGLXVendorPrivateWithReplyReq, vpreq);
105 req = (xGLXMakeCurrentReadSGIReq *) vpreq;
106 req->reqType = opcode;
107 req->glxCode = X_GLXVendorPrivateWithReply;
108 req->vendorCode = X_GLXvop_MakeCurrentReadSGI;
109 req->drawable = draw;
110 req->readable = read;
111 req->context = gc_id;
112 req->oldContextTag = gc_tag;
113 }
114 }
115
116 ret = _XReply(dpy, (xReply *) &reply, 0, False);
117
118 if (out_tag)
119 *out_tag = reply.contextTag;
120
121 UnlockDisplay(dpy);
122 SyncHandle();
123
124 return ret;
125 }
126
127 static int
128 indirect_bind_context(struct glx_context *gc, struct glx_context *old,
129 GLXDrawable draw, GLXDrawable read)
130 {
131 GLXContextTag tag;
132 __GLXattribute *state;
133 Display *dpy = gc->psc->dpy;
134 int opcode = __glXSetupForCommand(dpy);
135 Bool sent;
136
137 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) {
138 tag = old->currentContextTag;
139 old->currentContextTag = 0;
140 } else {
141 tag = 0;
142 }
143
144 sent = SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read,
145 &gc->currentContextTag);
146
147 if (!IndirectAPI)
148 IndirectAPI = __glXNewIndirectAPI();
149 _glapi_set_dispatch(IndirectAPI);
150
151 state = gc->client_state_private;
152 if (state->array_state == NULL) {
153 glGetString(GL_EXTENSIONS);
154 glGetString(GL_VERSION);
155 __glXInitVertexArrayState(gc);
156 }
157
158 return !sent;
159 }
160
161 static void
162 indirect_unbind_context(struct glx_context *gc, struct glx_context *new)
163 {
164 Display *dpy = gc->psc->dpy;
165 int opcode = __glXSetupForCommand(dpy);
166
167 if (gc == new)
168 return;
169
170 /* We are either switching to no context, away from a indirect
171 * context to a direct context or from one dpy to another and have
172 * to send a request to the dpy to unbind the previous context.
173 */
174 if (!new || new->isDirect || new->psc->dpy != dpy) {
175 SendMakeCurrentRequest(dpy, opcode, None,
176 gc->currentContextTag, None, None, NULL);
177 gc->currentContextTag = 0;
178 }
179 }
180
181 static void
182 indirect_wait_gl(struct glx_context *gc)
183 {
184 xGLXWaitGLReq *req;
185 Display *dpy = gc->currentDpy;
186
187 /* Flush any pending commands out */
188 __glXFlushRenderBuffer(gc, gc->pc);
189
190 /* Send the glXWaitGL request */
191 LockDisplay(dpy);
192 GetReq(GLXWaitGL, req);
193 req->reqType = gc->majorOpcode;
194 req->glxCode = X_GLXWaitGL;
195 req->contextTag = gc->currentContextTag;
196 UnlockDisplay(dpy);
197 SyncHandle();
198 }
199
200 static void
201 indirect_wait_x(struct glx_context *gc)
202 {
203 xGLXWaitXReq *req;
204 Display *dpy = gc->currentDpy;
205
206 /* Flush any pending commands out */
207 __glXFlushRenderBuffer(gc, gc->pc);
208
209 LockDisplay(dpy);
210 GetReq(GLXWaitX, req);
211 req->reqType = gc->majorOpcode;
212 req->glxCode = X_GLXWaitX;
213 req->contextTag = gc->currentContextTag;
214 UnlockDisplay(dpy);
215 SyncHandle();
216 }
217
218 static void
219 indirect_use_x_font(struct glx_context *gc,
220 Font font, int first, int count, int listBase)
221 {
222 xGLXUseXFontReq *req;
223 Display *dpy = gc->currentDpy;
224
225 /* Flush any pending commands out */
226 __glXFlushRenderBuffer(gc, gc->pc);
227
228 /* Send the glXUseFont request */
229 LockDisplay(dpy);
230 GetReq(GLXUseXFont, req);
231 req->reqType = gc->majorOpcode;
232 req->glxCode = X_GLXUseXFont;
233 req->contextTag = gc->currentContextTag;
234 req->font = font;
235 req->first = first;
236 req->count = count;
237 req->listBase = listBase;
238 UnlockDisplay(dpy);
239 SyncHandle();
240 }
241
242 static void
243 indirect_bind_tex_image(Display * dpy,
244 GLXDrawable drawable,
245 int buffer, const int *attrib_list)
246 {
247 xGLXVendorPrivateReq *req;
248 struct glx_context *gc = __glXGetCurrentContext();
249 CARD32 *drawable_ptr;
250 INT32 *buffer_ptr;
251 CARD32 *num_attrib_ptr;
252 CARD32 *attrib_ptr;
253 CARD8 opcode;
254 unsigned int i;
255
256 i = 0;
257 if (attrib_list) {
258 while (attrib_list[i * 2] != None)
259 i++;
260 }
261
262 opcode = __glXSetupForCommand(dpy);
263 if (!opcode)
264 return;
265
266 LockDisplay(dpy);
267 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req);
268 req->reqType = opcode;
269 req->glxCode = X_GLXVendorPrivate;
270 req->vendorCode = X_GLXvop_BindTexImageEXT;
271 req->contextTag = gc->currentContextTag;
272
273 drawable_ptr = (CARD32 *) (req + 1);
274 buffer_ptr = (INT32 *) (drawable_ptr + 1);
275 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1);
276 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1);
277
278 *drawable_ptr = drawable;
279 *buffer_ptr = buffer;
280 *num_attrib_ptr = (CARD32) i;
281
282 i = 0;
283 if (attrib_list) {
284 while (attrib_list[i * 2] != None) {
285 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0];
286 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1];
287 i++;
288 }
289 }
290
291 UnlockDisplay(dpy);
292 SyncHandle();
293 }
294
295 static void
296 indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer)
297 {
298 xGLXVendorPrivateReq *req;
299 struct glx_context *gc = __glXGetCurrentContext();
300 CARD32 *drawable_ptr;
301 INT32 *buffer_ptr;
302 CARD8 opcode;
303
304 opcode = __glXSetupForCommand(dpy);
305 if (!opcode)
306 return;
307
308 LockDisplay(dpy);
309 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req);
310 req->reqType = opcode;
311 req->glxCode = X_GLXVendorPrivate;
312 req->vendorCode = X_GLXvop_ReleaseTexImageEXT;
313 req->contextTag = gc->currentContextTag;
314
315 drawable_ptr = (CARD32 *) (req + 1);
316 buffer_ptr = (INT32 *) (drawable_ptr + 1);
317
318 *drawable_ptr = drawable;
319 *buffer_ptr = buffer;
320
321 UnlockDisplay(dpy);
322 SyncHandle();
323 }
324
325 static const struct glx_context_vtable indirect_context_vtable = {
326 .destroy = indirect_destroy_context,
327 .bind = indirect_bind_context,
328 .unbind = indirect_unbind_context,
329 .wait_gl = indirect_wait_gl,
330 .wait_x = indirect_wait_x,
331 .use_x_font = indirect_use_x_font,
332 .bind_tex_image = indirect_bind_tex_image,
333 .release_tex_image = indirect_release_tex_image,
334 .get_proc_address = NULL,
335 };
336
337 /**
338 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new
339 * function called \c __glXAllocateClientState that allocates the memory and
340 * does all the initialization (including the pixel pack / unpack).
341 *
342 * \note
343 * This function is \b not the place to validate the context creation
344 * parameters. It is just the allocator for the \c glx_context.
345 */
346 _X_HIDDEN struct glx_context *
347 indirect_create_context(struct glx_screen *psc,
348 struct glx_config *mode,
349 struct glx_context *shareList, int renderType)
350 {
351 struct glx_context *gc;
352 int bufSize;
353 CARD8 opcode;
354 __GLXattribute *state;
355
356 opcode = __glXSetupForCommand(psc->dpy);
357 if (!opcode) {
358 return NULL;
359 }
360
361 /* Allocate our context record */
362 gc = calloc(1, sizeof *gc);
363 if (!gc) {
364 /* Out of memory */
365 return NULL;
366 }
367
368 glx_context_init(gc, psc, mode);
369 gc->isDirect = GL_FALSE;
370 gc->vtable = &indirect_context_vtable;
371 state = calloc(1, sizeof(struct __GLXattributeRec));
372 gc->renderType = renderType;
373
374 if (state == NULL) {
375 /* Out of memory */
376 free(gc);
377 return NULL;
378 }
379 gc->client_state_private = state;
380 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL);
381
382 /*
383 ** Create a temporary buffer to hold GLX rendering commands. The size
384 ** of the buffer is selected so that the maximum number of GLX rendering
385 ** commands can fit in a single X packet and still have room in the X
386 ** packet for the GLXRenderReq header.
387 */
388
389 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq;
390 gc->buf = malloc(bufSize);
391 if (!gc->buf) {
392 free(gc->client_state_private);
393 free(gc);
394 return NULL;
395 }
396 gc->bufSize = bufSize;
397
398 /* Fill in the new context */
399 gc->renderMode = GL_RENDER;
400
401 state->storePack.alignment = 4;
402 state->storeUnpack.alignment = 4;
403
404 gc->attributes.stackPointer = &gc->attributes.stack[0];
405
406 /*
407 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up.
408 */
409 gc->fillImage = __glFillImage;
410 gc->pc = gc->buf;
411 gc->bufEnd = gc->buf + bufSize;
412 gc->isDirect = GL_FALSE;
413 if (__glXDebug) {
414 /*
415 ** Set limit register so that there will be one command per packet
416 */
417 gc->limit = gc->buf;
418 }
419 else {
420 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE;
421 }
422 gc->majorOpcode = opcode;
423
424 /*
425 ** Constrain the maximum drawing command size allowed to be
426 ** transfered using the X_GLXRender protocol request. First
427 ** constrain by a software limit, then constrain by the protocl
428 ** limit.
429 */
430 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) {
431 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT;
432 }
433 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) {
434 bufSize = __GLX_MAX_RENDER_CMD_SIZE;
435 }
436 gc->maxSmallRenderCommandSize = bufSize;
437
438
439 return gc;
440 }
441
442 _X_HIDDEN struct glx_context *
443 indirect_create_context_attribs(struct glx_screen *base,
444 struct glx_config *config_base,
445 struct glx_context *shareList,
446 unsigned num_attribs,
447 const uint32_t *attribs,
448 unsigned *error)
449 {
450 int renderType = GLX_RGBA_TYPE;
451 unsigned i;
452
453 /* The error parameter is only used on the server so that correct GLX
454 * protocol errors can be generated. On the client, it can be ignored.
455 */
456 (void) error;
457
458 /* All of the attribute validation for indirect contexts is handled on the
459 * server, so there's not much to do here. Still, we need to parse the
460 * attributes to correctly set renderType.
461 */
462 for (i = 0; i < num_attribs; i++) {
463 if (attribs[i * 2] == GLX_RENDER_TYPE)
464 renderType = attribs[i * 2 + 1];
465 }
466
467 return indirect_create_context(base, config_base, shareList, renderType);
468 }
469
470 static const struct glx_screen_vtable indirect_screen_vtable = {
471 .create_context = indirect_create_context,
472 .create_context_attribs = indirect_create_context_attribs,
473 .query_renderer_integer = NULL,
474 .query_renderer_string = NULL,
475 };
476
477 _X_HIDDEN struct glx_screen *
478 indirect_create_screen(int screen, struct glx_display * priv)
479 {
480 struct glx_screen *psc;
481
482 psc = calloc(1, sizeof *psc);
483 if (psc == NULL)
484 return NULL;
485
486 glx_screen_init(psc, screen, priv);
487 psc->vtable = &indirect_screen_vtable;
488
489 return psc;
490 }