glx: Store the RENDER_TYPE in indirect rendering
[mesa.git] / src / glx / indirect_glx.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Soft-
6 * ware"), to deal in the Software without restriction, including without
7 * limitation the rights to use, copy, modify, merge, publish, distribute,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, provided that the above copyright
10 * notice(s) and this permission notice appear in all copies of the Soft-
11 * ware and that both the above copyright notice(s) and this permission
12 * notice appear in supporting documentation.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY
17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE-
19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR-
22 * MANCE OF THIS SOFTWARE.
23 *
24 * Except as contained in this notice, the name of a copyright holder shall
25 * not be used in advertising or otherwise to promote the sale, use or
26 * other dealings in this Software without prior written authorization of
27 * the copyright holder.
28 *
29 * Authors:
30 * Kristian Høgsberg (krh@bitplanet.net)
31 */
32
33 #include "glapi.h"
34 #include "glxclient.h"
35
36 extern struct _glapi_table *__glXNewIndirectAPI(void);
37
38 /*
39 ** All indirect rendering contexts will share the same indirect dispatch table.
40 */
41 static struct _glapi_table *IndirectAPI = NULL;
42
43 static void
44 indirect_destroy_context(struct glx_context *gc)
45 {
46 __glXFreeVertexArrayState(gc);
47
48 free((char *) gc->vendor);
49 free((char *) gc->renderer);
50 free((char *) gc->version);
51 free((char *) gc->extensions);
52 __glFreeAttributeState(gc);
53 free((char *) gc->buf);
54 free((char *) gc->client_state_private);
55 free((char *) gc);
56 }
57
58 static Bool
59 SendMakeCurrentRequest(Display * dpy, CARD8 opcode,
60 GLXContextID gc_id, GLXContextTag gc_tag,
61 GLXDrawable draw, GLXDrawable read,
62 xGLXMakeCurrentReply * reply)
63 {
64 Bool ret;
65
66 LockDisplay(dpy);
67
68 if (draw == read) {
69 xGLXMakeCurrentReq *req;
70
71 GetReq(GLXMakeCurrent, req);
72 req->reqType = opcode;
73 req->glxCode = X_GLXMakeCurrent;
74 req->drawable = draw;
75 req->context = gc_id;
76 req->oldContextTag = gc_tag;
77 }
78 else {
79 struct glx_display *priv = __glXInitialize(dpy);
80
81 /* If the server can support the GLX 1.3 version, we should
82 * perfer that. Not only that, some servers support GLX 1.3 but
83 * not the SGI extension.
84 */
85
86 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) {
87 xGLXMakeContextCurrentReq *req;
88
89 GetReq(GLXMakeContextCurrent, req);
90 req->reqType = opcode;
91 req->glxCode = X_GLXMakeContextCurrent;
92 req->drawable = draw;
93 req->readdrawable = read;
94 req->context = gc_id;
95 req->oldContextTag = gc_tag;
96 }
97 else {
98 xGLXVendorPrivateWithReplyReq *vpreq;
99 xGLXMakeCurrentReadSGIReq *req;
100
101 GetReqExtra(GLXVendorPrivateWithReply,
102 sz_xGLXMakeCurrentReadSGIReq -
103 sz_xGLXVendorPrivateWithReplyReq, vpreq);
104 req = (xGLXMakeCurrentReadSGIReq *) vpreq;
105 req->reqType = opcode;
106 req->glxCode = X_GLXVendorPrivateWithReply;
107 req->vendorCode = X_GLXvop_MakeCurrentReadSGI;
108 req->drawable = draw;
109 req->readable = read;
110 req->context = gc_id;
111 req->oldContextTag = gc_tag;
112 }
113 }
114
115 ret = _XReply(dpy, (xReply *) reply, 0, False);
116
117 UnlockDisplay(dpy);
118 SyncHandle();
119
120 return ret;
121 }
122
123 static int
124 indirect_bind_context(struct glx_context *gc, struct glx_context *old,
125 GLXDrawable draw, GLXDrawable read)
126 {
127 xGLXMakeCurrentReply reply;
128 GLXContextTag tag;
129 __GLXattribute *state;
130 Display *dpy = gc->psc->dpy;
131 int opcode = __glXSetupForCommand(dpy);
132
133 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) {
134 tag = old->currentContextTag;
135 old->currentContextTag = 0;
136 } else {
137 tag = 0;
138 }
139
140 SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read, &reply);
141
142 if (!IndirectAPI)
143 IndirectAPI = __glXNewIndirectAPI();
144 _glapi_set_dispatch(IndirectAPI);
145
146 gc->currentContextTag = reply.contextTag;
147 state = gc->client_state_private;
148 if (state->array_state == NULL) {
149 glGetString(GL_EXTENSIONS);
150 glGetString(GL_VERSION);
151 __glXInitVertexArrayState(gc);
152 }
153
154 return Success;
155 }
156
157 static void
158 indirect_unbind_context(struct glx_context *gc, struct glx_context *new)
159 {
160 Display *dpy = gc->psc->dpy;
161 int opcode = __glXSetupForCommand(dpy);
162 xGLXMakeCurrentReply reply;
163
164 if (gc == new)
165 return;
166
167 /* We are either switching to no context, away from a indirect
168 * context to a direct context or from one dpy to another and have
169 * to send a request to the dpy to unbind the previous context.
170 */
171 if (!new || new->isDirect || new->psc->dpy != dpy) {
172 SendMakeCurrentRequest(dpy, opcode, None,
173 gc->currentContextTag, None, None, &reply);
174 gc->currentContextTag = 0;
175 }
176 }
177
178 static void
179 indirect_wait_gl(struct glx_context *gc)
180 {
181 xGLXWaitGLReq *req;
182 Display *dpy = gc->currentDpy;
183
184 /* Flush any pending commands out */
185 __glXFlushRenderBuffer(gc, gc->pc);
186
187 /* Send the glXWaitGL request */
188 LockDisplay(dpy);
189 GetReq(GLXWaitGL, req);
190 req->reqType = gc->majorOpcode;
191 req->glxCode = X_GLXWaitGL;
192 req->contextTag = gc->currentContextTag;
193 UnlockDisplay(dpy);
194 SyncHandle();
195 }
196
197 static void
198 indirect_wait_x(struct glx_context *gc)
199 {
200 xGLXWaitXReq *req;
201 Display *dpy = gc->currentDpy;
202
203 /* Flush any pending commands out */
204 __glXFlushRenderBuffer(gc, gc->pc);
205
206 LockDisplay(dpy);
207 GetReq(GLXWaitX, req);
208 req->reqType = gc->majorOpcode;
209 req->glxCode = X_GLXWaitX;
210 req->contextTag = gc->currentContextTag;
211 UnlockDisplay(dpy);
212 SyncHandle();
213 }
214
215 static void
216 indirect_use_x_font(struct glx_context *gc,
217 Font font, int first, int count, int listBase)
218 {
219 xGLXUseXFontReq *req;
220 Display *dpy = gc->currentDpy;
221
222 /* Flush any pending commands out */
223 __glXFlushRenderBuffer(gc, gc->pc);
224
225 /* Send the glXUseFont request */
226 LockDisplay(dpy);
227 GetReq(GLXUseXFont, req);
228 req->reqType = gc->majorOpcode;
229 req->glxCode = X_GLXUseXFont;
230 req->contextTag = gc->currentContextTag;
231 req->font = font;
232 req->first = first;
233 req->count = count;
234 req->listBase = listBase;
235 UnlockDisplay(dpy);
236 SyncHandle();
237 }
238
239 static void
240 indirect_bind_tex_image(Display * dpy,
241 GLXDrawable drawable,
242 int buffer, const int *attrib_list)
243 {
244 xGLXVendorPrivateReq *req;
245 struct glx_context *gc = __glXGetCurrentContext();
246 CARD32 *drawable_ptr;
247 INT32 *buffer_ptr;
248 CARD32 *num_attrib_ptr;
249 CARD32 *attrib_ptr;
250 CARD8 opcode;
251 unsigned int i;
252
253 i = 0;
254 if (attrib_list) {
255 while (attrib_list[i * 2] != None)
256 i++;
257 }
258
259 opcode = __glXSetupForCommand(dpy);
260 if (!opcode)
261 return;
262
263 LockDisplay(dpy);
264 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req);
265 req->reqType = opcode;
266 req->glxCode = X_GLXVendorPrivate;
267 req->vendorCode = X_GLXvop_BindTexImageEXT;
268 req->contextTag = gc->currentContextTag;
269
270 drawable_ptr = (CARD32 *) (req + 1);
271 buffer_ptr = (INT32 *) (drawable_ptr + 1);
272 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1);
273 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1);
274
275 *drawable_ptr = drawable;
276 *buffer_ptr = buffer;
277 *num_attrib_ptr = (CARD32) i;
278
279 i = 0;
280 if (attrib_list) {
281 while (attrib_list[i * 2] != None) {
282 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0];
283 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1];
284 i++;
285 }
286 }
287
288 UnlockDisplay(dpy);
289 SyncHandle();
290 }
291
292 static void
293 indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer)
294 {
295 xGLXVendorPrivateReq *req;
296 struct glx_context *gc = __glXGetCurrentContext();
297 CARD32 *drawable_ptr;
298 INT32 *buffer_ptr;
299 CARD8 opcode;
300
301 opcode = __glXSetupForCommand(dpy);
302 if (!opcode)
303 return;
304
305 LockDisplay(dpy);
306 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req);
307 req->reqType = opcode;
308 req->glxCode = X_GLXVendorPrivate;
309 req->vendorCode = X_GLXvop_ReleaseTexImageEXT;
310 req->contextTag = gc->currentContextTag;
311
312 drawable_ptr = (CARD32 *) (req + 1);
313 buffer_ptr = (INT32 *) (drawable_ptr + 1);
314
315 *drawable_ptr = drawable;
316 *buffer_ptr = buffer;
317
318 UnlockDisplay(dpy);
319 SyncHandle();
320 }
321
322 static const struct glx_context_vtable indirect_context_vtable = {
323 indirect_destroy_context,
324 indirect_bind_context,
325 indirect_unbind_context,
326 indirect_wait_gl,
327 indirect_wait_x,
328 indirect_use_x_font,
329 indirect_bind_tex_image,
330 indirect_release_tex_image,
331 NULL, /* get_proc_address */
332 };
333
334 /**
335 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new
336 * function called \c __glXAllocateClientState that allocates the memory and
337 * does all the initialization (including the pixel pack / unpack).
338 */
339 _X_HIDDEN struct glx_context *
340 indirect_create_context(struct glx_screen *psc,
341 struct glx_config *mode,
342 struct glx_context *shareList, int renderType)
343 {
344 struct glx_context *gc;
345 int bufSize;
346 CARD8 opcode;
347 __GLXattribute *state;
348
349 opcode = __glXSetupForCommand(psc->dpy);
350 if (!opcode) {
351 return NULL;
352 }
353
354 /* Allocate our context record */
355 gc = calloc(1, sizeof *gc);
356 if (!gc) {
357 /* Out of memory */
358 return NULL;
359 }
360
361 glx_context_init(gc, psc, mode);
362 gc->isDirect = GL_FALSE;
363 gc->vtable = &indirect_context_vtable;
364 state = calloc(1, sizeof(struct __GLXattributeRec));
365 gc->renderType = renderType;
366
367 if (state == NULL) {
368 /* Out of memory */
369 free(gc);
370 return NULL;
371 }
372 gc->client_state_private = state;
373 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL);
374
375 /*
376 ** Create a temporary buffer to hold GLX rendering commands. The size
377 ** of the buffer is selected so that the maximum number of GLX rendering
378 ** commands can fit in a single X packet and still have room in the X
379 ** packet for the GLXRenderReq header.
380 */
381
382 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq;
383 gc->buf = malloc(bufSize);
384 if (!gc->buf) {
385 free(gc->client_state_private);
386 free(gc);
387 return NULL;
388 }
389 gc->bufSize = bufSize;
390
391 /* Fill in the new context */
392 gc->renderMode = GL_RENDER;
393
394 state->storePack.alignment = 4;
395 state->storeUnpack.alignment = 4;
396
397 gc->attributes.stackPointer = &gc->attributes.stack[0];
398
399 /*
400 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up.
401 */
402 gc->fillImage = __glFillImage;
403 gc->pc = gc->buf;
404 gc->bufEnd = gc->buf + bufSize;
405 gc->isDirect = GL_FALSE;
406 if (__glXDebug) {
407 /*
408 ** Set limit register so that there will be one command per packet
409 */
410 gc->limit = gc->buf;
411 }
412 else {
413 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE;
414 }
415 gc->majorOpcode = opcode;
416
417 /*
418 ** Constrain the maximum drawing command size allowed to be
419 ** transfered using the X_GLXRender protocol request. First
420 ** constrain by a software limit, then constrain by the protocl
421 ** limit.
422 */
423 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) {
424 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT;
425 }
426 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) {
427 bufSize = __GLX_MAX_RENDER_CMD_SIZE;
428 }
429 gc->maxSmallRenderCommandSize = bufSize;
430
431
432 return gc;
433 }
434
435 _X_HIDDEN struct glx_context *
436 indirect_create_context_attribs(struct glx_screen *base,
437 struct glx_config *config_base,
438 struct glx_context *shareList,
439 unsigned num_attribs,
440 const uint32_t *attribs,
441 unsigned *error)
442 {
443 int renderType = GLX_RGBA_TYPE;
444 unsigned i;
445
446 /* The error parameter is only used on the server so that correct GLX
447 * protocol errors can be generated. On the client, it can be ignored.
448 */
449 (void) error;
450
451 /* All of the attribute validation for indirect contexts is handled on the
452 * server, so there's not much to do here. Still, we need to parse the
453 * attributes to correctly set renderType.
454 */
455 for (i = 0; i < num_attribs; i++) {
456 if (attribs[i * 2] == GLX_RENDER_TYPE)
457 renderType = attribs[i * 2 + 1];
458 }
459
460 return indirect_create_context(base, config_base, shareList, renderType);
461 }
462
463 struct glx_screen_vtable indirect_screen_vtable = {
464 indirect_create_context,
465 indirect_create_context_attribs
466 };
467
468 _X_HIDDEN struct glx_screen *
469 indirect_create_screen(int screen, struct glx_display * priv)
470 {
471 struct glx_screen *psc;
472
473 psc = calloc(1, sizeof *psc);
474 if (psc == NULL)
475 return NULL;
476
477 glx_screen_init(psc, screen, priv);
478 psc->vtable = &indirect_screen_vtable;
479
480 return psc;
481 }