Merge branch 'master' into pipe-video
[mesa.git] / src / glx / indirect_glx.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Soft-
6 * ware"), to deal in the Software without restriction, including without
7 * limitation the rights to use, copy, modify, merge, publish, distribute,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, provided that the above copyright
10 * notice(s) and this permission notice appear in all copies of the Soft-
11 * ware and that both the above copyright notice(s) and this permission
12 * notice appear in supporting documentation.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY
17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE-
19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR-
22 * MANCE OF THIS SOFTWARE.
23 *
24 * Except as contained in this notice, the name of a copyright holder shall
25 * not be used in advertising or otherwise to promote the sale, use or
26 * other dealings in this Software without prior written authorization of
27 * the copyright holder.
28 *
29 * Authors:
30 * Kristian Høgsberg (krh@bitplanet.net)
31 */
32
33 #include "glapi.h"
34 #include "glxclient.h"
35
36 extern struct _glapi_table *__glXNewIndirectAPI(void);
37
38 /*
39 ** All indirect rendering contexts will share the same indirect dispatch table.
40 */
41 static struct _glapi_table *IndirectAPI = NULL;
42
43 static void
44 indirect_destroy_context(struct glx_context *gc)
45 {
46 if (!gc->imported && gc->xid)
47 glx_send_destroy_context(gc->psc->dpy, gc->xid);
48
49 __glXFreeVertexArrayState(gc);
50
51 if (gc->vendor)
52 XFree((char *) gc->vendor);
53 if (gc->renderer)
54 XFree((char *) gc->renderer);
55 if (gc->version)
56 XFree((char *) gc->version);
57 if (gc->extensions)
58 XFree((char *) gc->extensions);
59 __glFreeAttributeState(gc);
60 XFree((char *) gc->buf);
61 Xfree((char *) gc->client_state_private);
62 XFree((char *) gc);
63 }
64
65 static Bool
66 SendMakeCurrentRequest(Display * dpy, CARD8 opcode,
67 GLXContextID gc_id, GLXContextTag gc_tag,
68 GLXDrawable draw, GLXDrawable read,
69 xGLXMakeCurrentReply * reply)
70 {
71 Bool ret;
72
73 LockDisplay(dpy);
74
75 if (draw == read) {
76 xGLXMakeCurrentReq *req;
77
78 GetReq(GLXMakeCurrent, req);
79 req->reqType = opcode;
80 req->glxCode = X_GLXMakeCurrent;
81 req->drawable = draw;
82 req->context = gc_id;
83 req->oldContextTag = gc_tag;
84 }
85 else {
86 struct glx_display *priv = __glXInitialize(dpy);
87
88 /* If the server can support the GLX 1.3 version, we should
89 * perfer that. Not only that, some servers support GLX 1.3 but
90 * not the SGI extension.
91 */
92
93 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) {
94 xGLXMakeContextCurrentReq *req;
95
96 GetReq(GLXMakeContextCurrent, req);
97 req->reqType = opcode;
98 req->glxCode = X_GLXMakeContextCurrent;
99 req->drawable = draw;
100 req->readdrawable = read;
101 req->context = gc_id;
102 req->oldContextTag = gc_tag;
103 }
104 else {
105 xGLXVendorPrivateWithReplyReq *vpreq;
106 xGLXMakeCurrentReadSGIReq *req;
107
108 GetReqExtra(GLXVendorPrivateWithReply,
109 sz_xGLXMakeCurrentReadSGIReq -
110 sz_xGLXVendorPrivateWithReplyReq, vpreq);
111 req = (xGLXMakeCurrentReadSGIReq *) vpreq;
112 req->reqType = opcode;
113 req->glxCode = X_GLXVendorPrivateWithReply;
114 req->vendorCode = X_GLXvop_MakeCurrentReadSGI;
115 req->drawable = draw;
116 req->readable = read;
117 req->context = gc_id;
118 req->oldContextTag = gc_tag;
119 }
120 }
121
122 ret = _XReply(dpy, (xReply *) reply, 0, False);
123
124 UnlockDisplay(dpy);
125 SyncHandle();
126
127 return ret;
128 }
129
130 static int
131 indirect_bind_context(struct glx_context *gc, struct glx_context *old,
132 GLXDrawable draw, GLXDrawable read)
133 {
134 xGLXMakeCurrentReply reply;
135 GLXContextTag tag;
136 __GLXattribute *state;
137 Display *dpy = gc->psc->dpy;
138 int opcode = __glXSetupForCommand(dpy);
139
140 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) {
141 tag = old->currentContextTag;
142 old->currentContextTag = 0;
143 } else {
144 tag = 0;
145 }
146
147 SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read, &reply);
148
149 if (!IndirectAPI)
150 IndirectAPI = __glXNewIndirectAPI();
151 _glapi_set_dispatch(IndirectAPI);
152
153 gc->currentContextTag = reply.contextTag;
154 state = gc->client_state_private;
155 if (state->array_state == NULL) {
156 glGetString(GL_EXTENSIONS);
157 glGetString(GL_VERSION);
158 __glXInitVertexArrayState(gc);
159 }
160
161 return Success;
162 }
163
164 static void
165 indirect_unbind_context(struct glx_context *gc, struct glx_context *new)
166 {
167 Display *dpy = gc->psc->dpy;
168 int opcode = __glXSetupForCommand(dpy);
169 xGLXMakeCurrentReply reply;
170
171 if (gc == new)
172 return;
173
174 /* We are either switching to no context, away from a indirect
175 * context to a direct context or from one dpy to another and have
176 * to send a request to the dpy to unbind the previous context.
177 */
178 if (!new || new->isDirect || new->psc->dpy != dpy) {
179 SendMakeCurrentRequest(dpy, opcode, None,
180 gc->currentContextTag, None, None, &reply);
181 gc->currentContextTag = 0;
182 }
183 }
184
185 static void
186 indirect_wait_gl(struct glx_context *gc)
187 {
188 xGLXWaitGLReq *req;
189 Display *dpy = gc->currentDpy;
190
191 /* Flush any pending commands out */
192 __glXFlushRenderBuffer(gc, gc->pc);
193
194 /* Send the glXWaitGL request */
195 LockDisplay(dpy);
196 GetReq(GLXWaitGL, req);
197 req->reqType = gc->majorOpcode;
198 req->glxCode = X_GLXWaitGL;
199 req->contextTag = gc->currentContextTag;
200 UnlockDisplay(dpy);
201 SyncHandle();
202 }
203
204 static void
205 indirect_wait_x(struct glx_context *gc)
206 {
207 xGLXWaitXReq *req;
208 Display *dpy = gc->currentDpy;
209
210 /* Flush any pending commands out */
211 __glXFlushRenderBuffer(gc, gc->pc);
212
213 LockDisplay(dpy);
214 GetReq(GLXWaitX, req);
215 req->reqType = gc->majorOpcode;
216 req->glxCode = X_GLXWaitX;
217 req->contextTag = gc->currentContextTag;
218 UnlockDisplay(dpy);
219 SyncHandle();
220 }
221
222 static void
223 indirect_use_x_font(struct glx_context *gc,
224 Font font, int first, int count, int listBase)
225 {
226 xGLXUseXFontReq *req;
227 Display *dpy = gc->currentDpy;
228
229 /* Flush any pending commands out */
230 __glXFlushRenderBuffer(gc, gc->pc);
231
232 /* Send the glXUseFont request */
233 LockDisplay(dpy);
234 GetReq(GLXUseXFont, req);
235 req->reqType = gc->majorOpcode;
236 req->glxCode = X_GLXUseXFont;
237 req->contextTag = gc->currentContextTag;
238 req->font = font;
239 req->first = first;
240 req->count = count;
241 req->listBase = listBase;
242 UnlockDisplay(dpy);
243 SyncHandle();
244 }
245
246 static void
247 indirect_bind_tex_image(Display * dpy,
248 GLXDrawable drawable,
249 int buffer, const int *attrib_list)
250 {
251 xGLXVendorPrivateReq *req;
252 struct glx_context *gc = __glXGetCurrentContext();
253 CARD32 *drawable_ptr;
254 INT32 *buffer_ptr;
255 CARD32 *num_attrib_ptr;
256 CARD32 *attrib_ptr;
257 CARD8 opcode;
258 unsigned int i;
259
260 i = 0;
261 if (attrib_list) {
262 while (attrib_list[i * 2] != None)
263 i++;
264 }
265
266 opcode = __glXSetupForCommand(dpy);
267 if (!opcode)
268 return;
269
270 LockDisplay(dpy);
271 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req);
272 req->reqType = opcode;
273 req->glxCode = X_GLXVendorPrivate;
274 req->vendorCode = X_GLXvop_BindTexImageEXT;
275 req->contextTag = gc->currentContextTag;
276
277 drawable_ptr = (CARD32 *) (req + 1);
278 buffer_ptr = (INT32 *) (drawable_ptr + 1);
279 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1);
280 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1);
281
282 *drawable_ptr = drawable;
283 *buffer_ptr = buffer;
284 *num_attrib_ptr = (CARD32) i;
285
286 i = 0;
287 if (attrib_list) {
288 while (attrib_list[i * 2] != None) {
289 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0];
290 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1];
291 i++;
292 }
293 }
294
295 UnlockDisplay(dpy);
296 SyncHandle();
297 }
298
299 static void
300 indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer)
301 {
302 xGLXVendorPrivateReq *req;
303 struct glx_context *gc = __glXGetCurrentContext();
304 CARD32 *drawable_ptr;
305 INT32 *buffer_ptr;
306 CARD8 opcode;
307
308 opcode = __glXSetupForCommand(dpy);
309 if (!opcode)
310 return;
311
312 LockDisplay(dpy);
313 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req);
314 req->reqType = opcode;
315 req->glxCode = X_GLXVendorPrivate;
316 req->vendorCode = X_GLXvop_ReleaseTexImageEXT;
317 req->contextTag = gc->currentContextTag;
318
319 drawable_ptr = (CARD32 *) (req + 1);
320 buffer_ptr = (INT32 *) (drawable_ptr + 1);
321
322 *drawable_ptr = drawable;
323 *buffer_ptr = buffer;
324
325 UnlockDisplay(dpy);
326 SyncHandle();
327 }
328
329 static const struct glx_context_vtable indirect_context_vtable = {
330 indirect_destroy_context,
331 indirect_bind_context,
332 indirect_unbind_context,
333 indirect_wait_gl,
334 indirect_wait_x,
335 indirect_use_x_font,
336 indirect_bind_tex_image,
337 indirect_release_tex_image,
338 };
339
340 /**
341 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new
342 * function called \c __glXAllocateClientState that allocates the memory and
343 * does all the initialization (including the pixel pack / unpack).
344 */
345 _X_HIDDEN struct glx_context *
346 indirect_create_context(struct glx_screen *psc,
347 struct glx_config *mode,
348 struct glx_context *shareList, int renderType)
349 {
350 struct glx_context *gc;
351 int bufSize;
352 CARD8 opcode;
353 __GLXattribute *state;
354
355 opcode = __glXSetupForCommand(psc->dpy);
356 if (!opcode) {
357 return NULL;
358 }
359
360 /* Allocate our context record */
361 gc = Xmalloc(sizeof *gc);
362 if (!gc) {
363 /* Out of memory */
364 return NULL;
365 }
366 memset(gc, 0, sizeof *gc);
367
368 glx_context_init(gc, psc, mode);
369 gc->isDirect = GL_FALSE;
370 gc->vtable = &indirect_context_vtable;
371 state = Xmalloc(sizeof(struct __GLXattributeRec));
372 if (state == NULL) {
373 /* Out of memory */
374 Xfree(gc);
375 return NULL;
376 }
377 gc->client_state_private = state;
378 memset(gc->client_state_private, 0, sizeof(struct __GLXattributeRec));
379 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL);
380
381 /*
382 ** Create a temporary buffer to hold GLX rendering commands. The size
383 ** of the buffer is selected so that the maximum number of GLX rendering
384 ** commands can fit in a single X packet and still have room in the X
385 ** packet for the GLXRenderReq header.
386 */
387
388 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq;
389 gc->buf = (GLubyte *) Xmalloc(bufSize);
390 if (!gc->buf) {
391 Xfree(gc->client_state_private);
392 Xfree(gc);
393 return NULL;
394 }
395 gc->bufSize = bufSize;
396
397 /* Fill in the new context */
398 gc->renderMode = GL_RENDER;
399
400 state->storePack.alignment = 4;
401 state->storeUnpack.alignment = 4;
402
403 gc->attributes.stackPointer = &gc->attributes.stack[0];
404
405 /*
406 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up.
407 ** Other code uses the fastImageUnpack bit, but it is never set
408 ** to GL_TRUE.
409 */
410 gc->fastImageUnpack = GL_FALSE;
411 gc->fillImage = __glFillImage;
412 gc->pc = gc->buf;
413 gc->bufEnd = gc->buf + bufSize;
414 gc->isDirect = GL_FALSE;
415 if (__glXDebug) {
416 /*
417 ** Set limit register so that there will be one command per packet
418 */
419 gc->limit = gc->buf;
420 }
421 else {
422 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE;
423 }
424 gc->majorOpcode = opcode;
425
426 /*
427 ** Constrain the maximum drawing command size allowed to be
428 ** transfered using the X_GLXRender protocol request. First
429 ** constrain by a software limit, then constrain by the protocl
430 ** limit.
431 */
432 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) {
433 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT;
434 }
435 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) {
436 bufSize = __GLX_MAX_RENDER_CMD_SIZE;
437 }
438 gc->maxSmallRenderCommandSize = bufSize;
439
440
441 return gc;
442 }
443
444 struct glx_screen_vtable indirect_screen_vtable = {
445 indirect_create_context
446 };
447
448 _X_HIDDEN struct glx_screen *
449 indirect_create_screen(int screen, struct glx_display * priv)
450 {
451 struct glx_screen *psc;
452
453 psc = Xmalloc(sizeof *psc);
454 if (psc == NULL)
455 return NULL;
456
457 memset(psc, 0, sizeof *psc);
458 glx_screen_init(psc, screen, priv);
459 psc->vtable = &indirect_screen_vtable;
460
461 return psc;
462 }