glx: Call __glXInitVertexArrayState() with a usable gc.
[mesa.git] / src / glx / indirect_glx.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Soft-
6 * ware"), to deal in the Software without restriction, including without
7 * limitation the rights to use, copy, modify, merge, publish, distribute,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, provided that the above copyright
10 * notice(s) and this permission notice appear in all copies of the Soft-
11 * ware and that both the above copyright notice(s) and this permission
12 * notice appear in supporting documentation.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY
17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE-
19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR-
22 * MANCE OF THIS SOFTWARE.
23 *
24 * Except as contained in this notice, the name of a copyright holder shall
25 * not be used in advertising or otherwise to promote the sale, use or
26 * other dealings in this Software without prior written authorization of
27 * the copyright holder.
28 *
29 * Authors:
30 * Kristian Høgsberg (krh@bitplanet.net)
31 */
32
33 #include "glapi.h"
34 #include "glxclient.h"
35
36 #ifndef GLX_USE_APPLEGL
37
38 extern struct _glapi_table *__glXNewIndirectAPI(void);
39
40 /*
41 ** All indirect rendering contexts will share the same indirect dispatch table.
42 */
43 static struct _glapi_table *IndirectAPI = NULL;
44
45 static void
46 indirect_destroy_context(struct glx_context *gc)
47 {
48 __glXFreeVertexArrayState(gc);
49
50 free((char *) gc->vendor);
51 free((char *) gc->renderer);
52 free((char *) gc->version);
53 free((char *) gc->extensions);
54 __glFreeAttributeState(gc);
55 free((char *) gc->buf);
56 free((char *) gc->client_state_private);
57 free((char *) gc);
58 }
59
60 static Bool
61 SendMakeCurrentRequest(Display * dpy, CARD8 opcode,
62 GLXContextID gc_id, GLXContextTag gc_tag,
63 GLXDrawable draw, GLXDrawable read,
64 GLXContextTag *out_tag)
65 {
66 xGLXMakeCurrentReply reply;
67 Bool ret;
68
69 LockDisplay(dpy);
70
71 if (draw == read) {
72 xGLXMakeCurrentReq *req;
73
74 GetReq(GLXMakeCurrent, req);
75 req->reqType = opcode;
76 req->glxCode = X_GLXMakeCurrent;
77 req->drawable = draw;
78 req->context = gc_id;
79 req->oldContextTag = gc_tag;
80 }
81 else {
82 struct glx_display *priv = __glXInitialize(dpy);
83
84 /* If the server can support the GLX 1.3 version, we should
85 * perfer that. Not only that, some servers support GLX 1.3 but
86 * not the SGI extension.
87 */
88
89 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) {
90 xGLXMakeContextCurrentReq *req;
91
92 GetReq(GLXMakeContextCurrent, req);
93 req->reqType = opcode;
94 req->glxCode = X_GLXMakeContextCurrent;
95 req->drawable = draw;
96 req->readdrawable = read;
97 req->context = gc_id;
98 req->oldContextTag = gc_tag;
99 }
100 else {
101 xGLXVendorPrivateWithReplyReq *vpreq;
102 xGLXMakeCurrentReadSGIReq *req;
103
104 GetReqExtra(GLXVendorPrivateWithReply,
105 sz_xGLXMakeCurrentReadSGIReq -
106 sz_xGLXVendorPrivateWithReplyReq, vpreq);
107 req = (xGLXMakeCurrentReadSGIReq *) vpreq;
108 req->reqType = opcode;
109 req->glxCode = X_GLXVendorPrivateWithReply;
110 req->vendorCode = X_GLXvop_MakeCurrentReadSGI;
111 req->drawable = draw;
112 req->readable = read;
113 req->context = gc_id;
114 req->oldContextTag = gc_tag;
115 }
116 }
117
118 ret = _XReply(dpy, (xReply *) &reply, 0, False);
119
120 if (out_tag)
121 *out_tag = reply.contextTag;
122
123 UnlockDisplay(dpy);
124 SyncHandle();
125
126 return ret;
127 }
128
129 static int
130 indirect_bind_context(struct glx_context *gc, struct glx_context *old,
131 GLXDrawable draw, GLXDrawable read)
132 {
133 GLXContextTag tag;
134 Display *dpy = gc->psc->dpy;
135 int opcode = __glXSetupForCommand(dpy);
136 Bool sent;
137
138 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) {
139 tag = old->currentContextTag;
140 old->currentContextTag = 0;
141 } else {
142 tag = 0;
143 }
144
145 sent = SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read,
146 &gc->currentContextTag);
147
148 if (!IndirectAPI)
149 IndirectAPI = __glXNewIndirectAPI();
150 _glapi_set_dispatch(IndirectAPI);
151
152 return !sent;
153 }
154
155 static void
156 indirect_unbind_context(struct glx_context *gc, struct glx_context *new)
157 {
158 Display *dpy = gc->psc->dpy;
159 int opcode = __glXSetupForCommand(dpy);
160
161 if (gc == new)
162 return;
163
164 /* We are either switching to no context, away from an indirect
165 * context to a direct context or from one dpy to another and have
166 * to send a request to the dpy to unbind the previous context.
167 */
168 if (!new || new->isDirect || new->psc->dpy != dpy) {
169 SendMakeCurrentRequest(dpy, opcode, None,
170 gc->currentContextTag, None, None, NULL);
171 gc->currentContextTag = 0;
172 }
173 }
174
175 static void
176 indirect_wait_gl(struct glx_context *gc)
177 {
178 xGLXWaitGLReq *req;
179 Display *dpy = gc->currentDpy;
180
181 /* Flush any pending commands out */
182 __glXFlushRenderBuffer(gc, gc->pc);
183
184 /* Send the glXWaitGL request */
185 LockDisplay(dpy);
186 GetReq(GLXWaitGL, req);
187 req->reqType = gc->majorOpcode;
188 req->glxCode = X_GLXWaitGL;
189 req->contextTag = gc->currentContextTag;
190 UnlockDisplay(dpy);
191 SyncHandle();
192 }
193
194 static void
195 indirect_wait_x(struct glx_context *gc)
196 {
197 xGLXWaitXReq *req;
198 Display *dpy = gc->currentDpy;
199
200 /* Flush any pending commands out */
201 __glXFlushRenderBuffer(gc, gc->pc);
202
203 LockDisplay(dpy);
204 GetReq(GLXWaitX, req);
205 req->reqType = gc->majorOpcode;
206 req->glxCode = X_GLXWaitX;
207 req->contextTag = gc->currentContextTag;
208 UnlockDisplay(dpy);
209 SyncHandle();
210 }
211
212 static void
213 indirect_use_x_font(struct glx_context *gc,
214 Font font, int first, int count, int listBase)
215 {
216 xGLXUseXFontReq *req;
217 Display *dpy = gc->currentDpy;
218
219 /* Flush any pending commands out */
220 __glXFlushRenderBuffer(gc, gc->pc);
221
222 /* Send the glXUseFont request */
223 LockDisplay(dpy);
224 GetReq(GLXUseXFont, req);
225 req->reqType = gc->majorOpcode;
226 req->glxCode = X_GLXUseXFont;
227 req->contextTag = gc->currentContextTag;
228 req->font = font;
229 req->first = first;
230 req->count = count;
231 req->listBase = listBase;
232 UnlockDisplay(dpy);
233 SyncHandle();
234 }
235
236 static void
237 indirect_bind_tex_image(Display * dpy,
238 GLXDrawable drawable,
239 int buffer, const int *attrib_list)
240 {
241 xGLXVendorPrivateReq *req;
242 struct glx_context *gc = __glXGetCurrentContext();
243 CARD32 *drawable_ptr;
244 INT32 *buffer_ptr;
245 CARD32 *num_attrib_ptr;
246 CARD32 *attrib_ptr;
247 CARD8 opcode;
248 unsigned int i;
249
250 i = 0;
251 if (attrib_list) {
252 while (attrib_list[i * 2] != None)
253 i++;
254 }
255
256 opcode = __glXSetupForCommand(dpy);
257 if (!opcode)
258 return;
259
260 LockDisplay(dpy);
261 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req);
262 req->reqType = opcode;
263 req->glxCode = X_GLXVendorPrivate;
264 req->vendorCode = X_GLXvop_BindTexImageEXT;
265 req->contextTag = gc->currentContextTag;
266
267 drawable_ptr = (CARD32 *) (req + 1);
268 buffer_ptr = (INT32 *) (drawable_ptr + 1);
269 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1);
270 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1);
271
272 *drawable_ptr = drawable;
273 *buffer_ptr = buffer;
274 *num_attrib_ptr = (CARD32) i;
275
276 i = 0;
277 if (attrib_list) {
278 while (attrib_list[i * 2] != None) {
279 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0];
280 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1];
281 i++;
282 }
283 }
284
285 UnlockDisplay(dpy);
286 SyncHandle();
287 }
288
289 static void
290 indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer)
291 {
292 xGLXVendorPrivateReq *req;
293 struct glx_context *gc = __glXGetCurrentContext();
294 CARD32 *drawable_ptr;
295 INT32 *buffer_ptr;
296 CARD8 opcode;
297
298 opcode = __glXSetupForCommand(dpy);
299 if (!opcode)
300 return;
301
302 LockDisplay(dpy);
303 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req);
304 req->reqType = opcode;
305 req->glxCode = X_GLXVendorPrivate;
306 req->vendorCode = X_GLXvop_ReleaseTexImageEXT;
307 req->contextTag = gc->currentContextTag;
308
309 drawable_ptr = (CARD32 *) (req + 1);
310 buffer_ptr = (INT32 *) (drawable_ptr + 1);
311
312 *drawable_ptr = drawable;
313 *buffer_ptr = buffer;
314
315 UnlockDisplay(dpy);
316 SyncHandle();
317 }
318
319 static const struct glx_context_vtable indirect_context_vtable = {
320 .destroy = indirect_destroy_context,
321 .bind = indirect_bind_context,
322 .unbind = indirect_unbind_context,
323 .wait_gl = indirect_wait_gl,
324 .wait_x = indirect_wait_x,
325 .use_x_font = indirect_use_x_font,
326 .bind_tex_image = indirect_bind_tex_image,
327 .release_tex_image = indirect_release_tex_image,
328 .get_proc_address = NULL,
329 };
330
331 /**
332 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new
333 * function called \c __glXAllocateClientState that allocates the memory and
334 * does all the initialization (including the pixel pack / unpack).
335 *
336 * \note
337 * This function is \b not the place to validate the context creation
338 * parameters. It is just the allocator for the \c glx_context.
339 */
340 _X_HIDDEN struct glx_context *
341 indirect_create_context(struct glx_screen *psc,
342 struct glx_config *mode,
343 struct glx_context *shareList, int renderType)
344 {
345 struct glx_context *gc;
346 int bufSize;
347 CARD8 opcode;
348 __GLXattribute *state;
349
350 opcode = __glXSetupForCommand(psc->dpy);
351 if (!opcode) {
352 return NULL;
353 }
354
355 /* Allocate our context record */
356 gc = calloc(1, sizeof *gc);
357 if (!gc) {
358 /* Out of memory */
359 return NULL;
360 }
361
362 glx_context_init(gc, psc, mode);
363 gc->isDirect = GL_FALSE;
364 gc->vtable = &indirect_context_vtable;
365 state = calloc(1, sizeof(struct __GLXattributeRec));
366 gc->renderType = renderType;
367
368 if (state == NULL) {
369 /* Out of memory */
370 free(gc);
371 return NULL;
372 }
373 gc->client_state_private = state;
374 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL);
375
376 /*
377 ** Create a temporary buffer to hold GLX rendering commands. The size
378 ** of the buffer is selected so that the maximum number of GLX rendering
379 ** commands can fit in a single X packet and still have room in the X
380 ** packet for the GLXRenderReq header.
381 */
382
383 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq;
384 gc->buf = malloc(bufSize);
385 if (!gc->buf) {
386 free(gc->client_state_private);
387 free(gc);
388 return NULL;
389 }
390 gc->bufSize = bufSize;
391
392 /* Fill in the new context */
393 gc->renderMode = GL_RENDER;
394
395 state->storePack.alignment = 4;
396 state->storeUnpack.alignment = 4;
397
398 gc->attributes.stackPointer = &gc->attributes.stack[0];
399
400 /*
401 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up.
402 */
403 gc->fillImage = __glFillImage;
404 gc->pc = gc->buf;
405 gc->bufEnd = gc->buf + bufSize;
406 gc->isDirect = GL_FALSE;
407 if (__glXDebug) {
408 /*
409 ** Set limit register so that there will be one command per packet
410 */
411 gc->limit = gc->buf;
412 }
413 else {
414 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE;
415 }
416 gc->majorOpcode = opcode;
417
418 /*
419 ** Constrain the maximum drawing command size allowed to be
420 ** transfered using the X_GLXRender protocol request. First
421 ** constrain by a software limit, then constrain by the protocl
422 ** limit.
423 */
424 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) {
425 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT;
426 }
427 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) {
428 bufSize = __GLX_MAX_RENDER_CMD_SIZE;
429 }
430 gc->maxSmallRenderCommandSize = bufSize;
431
432
433 return gc;
434 }
435
436 _X_HIDDEN struct glx_context *
437 indirect_create_context_attribs(struct glx_screen *base,
438 struct glx_config *config_base,
439 struct glx_context *shareList,
440 unsigned num_attribs,
441 const uint32_t *attribs,
442 unsigned *error)
443 {
444 int renderType = GLX_RGBA_TYPE;
445 unsigned i;
446
447 /* The error parameter is only used on the server so that correct GLX
448 * protocol errors can be generated. On the client, it can be ignored.
449 */
450 (void) error;
451
452 /* All of the attribute validation for indirect contexts is handled on the
453 * server, so there's not much to do here. Still, we need to parse the
454 * attributes to correctly set renderType.
455 */
456 for (i = 0; i < num_attribs; i++) {
457 if (attribs[i * 2] == GLX_RENDER_TYPE)
458 renderType = attribs[i * 2 + 1];
459 }
460
461 return indirect_create_context(base, config_base, shareList, renderType);
462 }
463
464 static const struct glx_screen_vtable indirect_screen_vtable = {
465 .create_context = indirect_create_context,
466 .create_context_attribs = indirect_create_context_attribs,
467 .query_renderer_integer = NULL,
468 .query_renderer_string = NULL,
469 };
470
471 _X_HIDDEN struct glx_screen *
472 indirect_create_screen(int screen, struct glx_display * priv)
473 {
474 struct glx_screen *psc;
475
476 psc = calloc(1, sizeof *psc);
477 if (psc == NULL)
478 return NULL;
479
480 glx_screen_init(psc, screen, priv);
481 psc->vtable = &indirect_screen_vtable;
482
483 return psc;
484 }
485
486 #endif