08069d71dd1fd18af8ad0bb4e5ebf217aec7f415
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/extensions.h"
32 #include "main/fbobject.h"
33 #include "main/framebuffer.h"
34 #include "main/imports.h"
35 #include "main/points.h"
36
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "tnl/tnl.h"
40 #include "drivers/common/driverfuncs.h"
41 #include "drivers/common/meta.h"
42
43 #include "intel_chipset.h"
44 #include "intel_buffers.h"
45 #include "intel_tex.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_clear.h"
48 #include "intel_extensions.h"
49 #include "intel_pixel.h"
50 #include "intel_regions.h"
51 #include "intel_buffer_objects.h"
52 #include "intel_fbo.h"
53 #include "intel_bufmgr.h"
54 #include "intel_screen.h"
55
56 #include "drirenderbuffer.h"
57 #include "utils.h"
58
59
60 #ifndef INTEL_DEBUG
61 int INTEL_DEBUG = (0);
62 #endif
63
64
65 #define DRIVER_DATE "20100330 DEVELOPMENT"
66 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
67
68
69 static const GLubyte *
70 intelGetString(GLcontext * ctx, GLenum name)
71 {
72 const struct intel_context *const intel = intel_context(ctx);
73 const char *chipset;
74 static char buffer[128];
75
76 switch (name) {
77 case GL_VENDOR:
78 return (GLubyte *) "Tungsten Graphics, Inc";
79 break;
80
81 case GL_RENDERER:
82 switch (intel->intelScreen->deviceID) {
83 case PCI_CHIP_845_G:
84 chipset = "Intel(R) 845G";
85 break;
86 case PCI_CHIP_I830_M:
87 chipset = "Intel(R) 830M";
88 break;
89 case PCI_CHIP_I855_GM:
90 chipset = "Intel(R) 852GM/855GM";
91 break;
92 case PCI_CHIP_I865_G:
93 chipset = "Intel(R) 865G";
94 break;
95 case PCI_CHIP_I915_G:
96 chipset = "Intel(R) 915G";
97 break;
98 case PCI_CHIP_E7221_G:
99 chipset = "Intel (R) E7221G (i915)";
100 break;
101 case PCI_CHIP_I915_GM:
102 chipset = "Intel(R) 915GM";
103 break;
104 case PCI_CHIP_I945_G:
105 chipset = "Intel(R) 945G";
106 break;
107 case PCI_CHIP_I945_GM:
108 chipset = "Intel(R) 945GM";
109 break;
110 case PCI_CHIP_I945_GME:
111 chipset = "Intel(R) 945GME";
112 break;
113 case PCI_CHIP_G33_G:
114 chipset = "Intel(R) G33";
115 break;
116 case PCI_CHIP_Q35_G:
117 chipset = "Intel(R) Q35";
118 break;
119 case PCI_CHIP_Q33_G:
120 chipset = "Intel(R) Q33";
121 break;
122 case PCI_CHIP_IGD_GM:
123 case PCI_CHIP_IGD_G:
124 chipset = "Intel(R) IGD";
125 break;
126 case PCI_CHIP_I965_Q:
127 chipset = "Intel(R) 965Q";
128 break;
129 case PCI_CHIP_I965_G:
130 case PCI_CHIP_I965_G_1:
131 chipset = "Intel(R) 965G";
132 break;
133 case PCI_CHIP_I946_GZ:
134 chipset = "Intel(R) 946GZ";
135 break;
136 case PCI_CHIP_I965_GM:
137 chipset = "Intel(R) 965GM";
138 break;
139 case PCI_CHIP_I965_GME:
140 chipset = "Intel(R) 965GME/GLE";
141 break;
142 case PCI_CHIP_GM45_GM:
143 chipset = "Mobile IntelĀ® GM45 Express Chipset";
144 break;
145 case PCI_CHIP_IGD_E_G:
146 chipset = "Intel(R) Integrated Graphics Device";
147 break;
148 case PCI_CHIP_G45_G:
149 chipset = "Intel(R) G45/G43";
150 break;
151 case PCI_CHIP_Q45_G:
152 chipset = "Intel(R) Q45/Q43";
153 break;
154 case PCI_CHIP_G41_G:
155 chipset = "Intel(R) G41";
156 break;
157 case PCI_CHIP_B43_G:
158 chipset = "Intel(R) B43";
159 break;
160 case PCI_CHIP_ILD_G:
161 chipset = "Intel(R) Ironlake Desktop";
162 break;
163 case PCI_CHIP_ILM_G:
164 chipset = "Intel(R) Ironlake Mobile";
165 break;
166 default:
167 chipset = "Unknown Intel Chipset";
168 break;
169 }
170
171 (void) driGetRendererString(buffer, chipset, DRIVER_DATE_GEM, 0);
172 return (GLubyte *) buffer;
173
174 default:
175 return NULL;
176 }
177 }
178
179 static void
180 intel_flush_front(GLcontext *ctx)
181 {
182 struct intel_context *intel = intel_context(ctx);
183 __DRIcontext *driContext = intel->driContext;
184 __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
185
186 if ((ctx->DrawBuffer->Name == 0) && intel->front_buffer_dirty) {
187 if (screen->dri2.loader &&
188 (screen->dri2.loader->base.version >= 2)
189 && (screen->dri2.loader->flushFrontBuffer != NULL) &&
190 driContext->driDrawablePriv &&
191 driContext->driDrawablePriv->loaderPrivate) {
192 (*screen->dri2.loader->flushFrontBuffer)(driContext->driDrawablePriv,
193 driContext->driDrawablePriv->loaderPrivate);
194
195 /* We set the dirty bit in intel_prepare_render() if we're
196 * front buffer rendering once we get there.
197 */
198 intel->front_buffer_dirty = GL_FALSE;
199 }
200 }
201 }
202
203 static unsigned
204 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
205 {
206 return _mesa_get_format_bytes(rb->Base.Format) * 8;
207 }
208
209 void
210 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
211 {
212 struct gl_framebuffer *fb = drawable->driverPrivate;
213 struct intel_renderbuffer *rb;
214 struct intel_region *region, *depth_region;
215 struct intel_context *intel = context->driverPrivate;
216 struct intel_renderbuffer *front_rb, *back_rb, *depth_rb, *stencil_rb;
217 __DRIbuffer *buffers = NULL;
218 __DRIscreen *screen;
219 int i, count;
220 unsigned int attachments[10];
221 const char *region_name;
222
223 /* If we're rendering to the fake front buffer, make sure all the
224 * pending drawing has landed on the real front buffer. Otherwise
225 * when we eventually get to DRI2GetBuffersWithFormat the stale
226 * real front buffer contents will get copied to the new fake front
227 * buffer.
228 */
229 if (intel->is_front_buffer_rendering) {
230 intel_flush(&intel->ctx);
231 intel_flush_front(&intel->ctx);
232 }
233
234 /* Set this up front, so that in case our buffers get invalidated
235 * while we're getting new buffers, we don't clobber the stamp and
236 * thus ignore the invalidate. */
237 drawable->lastStamp = drawable->dri2.stamp;
238
239 if (INTEL_DEBUG & DEBUG_DRI)
240 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
241
242 screen = intel->intelScreen->driScrnPriv;
243
244 if (screen->dri2.loader
245 && (screen->dri2.loader->base.version > 2)
246 && (screen->dri2.loader->getBuffersWithFormat != NULL)) {
247
248 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
249 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
250 depth_rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
251 stencil_rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
252
253 i = 0;
254 if ((intel->is_front_buffer_rendering ||
255 intel->is_front_buffer_reading ||
256 !back_rb) && front_rb) {
257 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
258 attachments[i++] = intel_bits_per_pixel(front_rb);
259 }
260
261 if (back_rb) {
262 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
263 attachments[i++] = intel_bits_per_pixel(back_rb);
264 }
265
266 if ((depth_rb != NULL) && (stencil_rb != NULL)) {
267 attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
268 attachments[i++] = intel_bits_per_pixel(depth_rb);
269 } else if (depth_rb != NULL) {
270 attachments[i++] = __DRI_BUFFER_DEPTH;
271 attachments[i++] = intel_bits_per_pixel(depth_rb);
272 } else if (stencil_rb != NULL) {
273 attachments[i++] = __DRI_BUFFER_STENCIL;
274 attachments[i++] = intel_bits_per_pixel(stencil_rb);
275 }
276
277 buffers =
278 (*screen->dri2.loader->getBuffersWithFormat)(drawable,
279 &drawable->w,
280 &drawable->h,
281 attachments, i / 2,
282 &count,
283 drawable->loaderPrivate);
284 } else if (screen->dri2.loader) {
285 i = 0;
286 if (intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT))
287 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
288 if (intel_get_renderbuffer(fb, BUFFER_BACK_LEFT))
289 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
290 if (intel_get_renderbuffer(fb, BUFFER_DEPTH))
291 attachments[i++] = __DRI_BUFFER_DEPTH;
292 if (intel_get_renderbuffer(fb, BUFFER_STENCIL))
293 attachments[i++] = __DRI_BUFFER_STENCIL;
294
295 buffers = (*screen->dri2.loader->getBuffers)(drawable,
296 &drawable->w,
297 &drawable->h,
298 attachments, i,
299 &count,
300 drawable->loaderPrivate);
301 }
302
303 if (buffers == NULL)
304 return;
305
306 drawable->x = 0;
307 drawable->y = 0;
308 drawable->backX = 0;
309 drawable->backY = 0;
310 drawable->numClipRects = 1;
311 drawable->pClipRects[0].x1 = 0;
312 drawable->pClipRects[0].y1 = 0;
313 drawable->pClipRects[0].x2 = drawable->w;
314 drawable->pClipRects[0].y2 = drawable->h;
315 drawable->numBackClipRects = 1;
316 drawable->pBackClipRects[0].x1 = 0;
317 drawable->pBackClipRects[0].y1 = 0;
318 drawable->pBackClipRects[0].x2 = drawable->w;
319 drawable->pBackClipRects[0].y2 = drawable->h;
320
321 depth_region = NULL;
322 for (i = 0; i < count; i++) {
323 switch (buffers[i].attachment) {
324 case __DRI_BUFFER_FRONT_LEFT:
325 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
326 region_name = "dri2 front buffer";
327 break;
328
329 case __DRI_BUFFER_FAKE_FRONT_LEFT:
330 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
331 region_name = "dri2 fake front buffer";
332 break;
333
334 case __DRI_BUFFER_BACK_LEFT:
335 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
336 region_name = "dri2 back buffer";
337 break;
338
339 case __DRI_BUFFER_DEPTH:
340 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
341 region_name = "dri2 depth buffer";
342 break;
343
344 case __DRI_BUFFER_DEPTH_STENCIL:
345 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
346 region_name = "dri2 depth / stencil buffer";
347 break;
348
349 case __DRI_BUFFER_STENCIL:
350 rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
351 region_name = "dri2 stencil buffer";
352 break;
353
354 case __DRI_BUFFER_ACCUM:
355 default:
356 fprintf(stderr,
357 "unhandled buffer attach event, attacment type %d\n",
358 buffers[i].attachment);
359 return;
360 }
361
362 if (rb == NULL)
363 continue;
364
365 if (rb->region && rb->region->name == buffers[i].name)
366 continue;
367
368 if (INTEL_DEBUG & DEBUG_DRI)
369 fprintf(stderr,
370 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
371 buffers[i].name, buffers[i].attachment,
372 buffers[i].cpp, buffers[i].pitch);
373
374 if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
375 if (INTEL_DEBUG & DEBUG_DRI)
376 fprintf(stderr, "(reusing depth buffer as stencil)\n");
377 intel_region_reference(&region, depth_region);
378 }
379 else
380 region = intel_region_alloc_for_handle(intel->intelScreen,
381 buffers[i].cpp,
382 drawable->w,
383 drawable->h,
384 buffers[i].pitch / buffers[i].cpp,
385 buffers[i].name,
386 region_name);
387
388 if (buffers[i].attachment == __DRI_BUFFER_DEPTH)
389 depth_region = region;
390
391 intel_renderbuffer_set_region(intel, rb, region);
392 intel_region_release(&region);
393
394 if (buffers[i].attachment == __DRI_BUFFER_DEPTH_STENCIL) {
395 rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
396 if (rb != NULL) {
397 struct intel_region *stencil_region = NULL;
398
399 if (rb->region && rb->region->name == buffers[i].name)
400 continue;
401
402 intel_region_reference(&stencil_region, region);
403 intel_renderbuffer_set_region(intel, rb, stencil_region);
404 intel_region_release(&stencil_region);
405 }
406 }
407 }
408
409 driUpdateFramebufferSize(&intel->ctx, drawable);
410 }
411
412 /**
413 * intel_prepare_render should be called anywhere that curent read/drawbuffer
414 * state is required.
415 */
416 void
417 intel_prepare_render(struct intel_context *intel)
418 {
419 __DRIcontext *driContext = intel->driContext;
420 __DRIdrawable *drawable;
421
422 drawable = driContext->driDrawablePriv;
423 if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
424 if (drawable->lastStamp != drawable->dri2.stamp)
425 intel_update_renderbuffers(driContext, drawable);
426 intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
427 driContext->dri2.draw_stamp = drawable->dri2.stamp;
428 }
429
430 drawable = driContext->driReadablePriv;
431 if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
432 if (drawable->lastStamp != drawable->dri2.stamp)
433 intel_update_renderbuffers(driContext, drawable);
434 driContext->dri2.read_stamp = drawable->dri2.stamp;
435 }
436
437 /* If we're currently rendering to the front buffer, the rendering
438 * that will happen next will probably dirty the front buffer. So
439 * mark it as dirty here.
440 */
441 if (intel->is_front_buffer_rendering)
442 intel->front_buffer_dirty = GL_TRUE;
443
444 /* Wait for the swapbuffers before the one we just emitted, so we
445 * don't get too many swaps outstanding for apps that are GPU-heavy
446 * but not CPU-heavy.
447 *
448 * We're using intelDRI2Flush (called from the loader before
449 * swapbuffer) and glFlush (for front buffer rendering) as the
450 * indicator that a frame is done and then throttle when we get
451 * here as we prepare to render the next frame. At this point for
452 * round trips for swap/copy and getting new buffers are done and
453 * we'll spend less time waiting on the GPU.
454 *
455 * Unfortunately, we don't have a handle to the batch containing
456 * the swap, and getting our hands on that doesn't seem worth it,
457 * so we just us the first batch we emitted after the last swap.
458 */
459 if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
460 drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
461 drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
462 intel->first_post_swapbuffers_batch = NULL;
463 intel->need_throttle = GL_FALSE;
464 }
465 }
466
467 static void
468 intel_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
469 {
470 struct intel_context *intel = intel_context(ctx);
471 __DRIcontext *driContext = intel->driContext;
472
473 if (intel->saved_viewport)
474 intel->saved_viewport(ctx, x, y, w, h);
475
476 if (!intel->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
477 dri2InvalidateDrawable(driContext->driDrawablePriv);
478 dri2InvalidateDrawable(driContext->driReadablePriv);
479 }
480 }
481
482 static const struct dri_debug_control debug_control[] = {
483 { "tex", DEBUG_TEXTURE},
484 { "state", DEBUG_STATE},
485 { "ioctl", DEBUG_IOCTL},
486 { "blit", DEBUG_BLIT},
487 { "mip", DEBUG_MIPTREE},
488 { "fall", DEBUG_FALLBACKS},
489 { "verb", DEBUG_VERBOSE},
490 { "bat", DEBUG_BATCH},
491 { "pix", DEBUG_PIXEL},
492 { "buf", DEBUG_BUFMGR},
493 { "reg", DEBUG_REGION},
494 { "fbo", DEBUG_FBO},
495 { "gs", DEBUG_GS},
496 { "sync", DEBUG_SYNC},
497 { "prim", DEBUG_PRIMS },
498 { "vert", DEBUG_VERTS },
499 { "dri", DEBUG_DRI },
500 { "sf", DEBUG_SF },
501 { "san", DEBUG_SANITY },
502 { "sleep", DEBUG_SLEEP },
503 { "stats", DEBUG_STATS },
504 { "tile", DEBUG_TILE },
505 { "sing", DEBUG_SINGLE_THREAD },
506 { "thre", DEBUG_SINGLE_THREAD },
507 { "wm", DEBUG_WM },
508 { "glsl_force", DEBUG_GLSL_FORCE },
509 { "urb", DEBUG_URB },
510 { "vs", DEBUG_VS },
511 { "clip", DEBUG_CLIP },
512 { NULL, 0 }
513 };
514
515
516 static void
517 intelInvalidateState(GLcontext * ctx, GLuint new_state)
518 {
519 struct intel_context *intel = intel_context(ctx);
520
521 _swrast_InvalidateState(ctx, new_state);
522 _swsetup_InvalidateState(ctx, new_state);
523 _vbo_InvalidateState(ctx, new_state);
524 _tnl_InvalidateState(ctx, new_state);
525 _tnl_invalidate_vertex_state(ctx, new_state);
526
527 intel->NewGLState |= new_state;
528
529 if (intel->vtbl.invalidate_state)
530 intel->vtbl.invalidate_state( intel, new_state );
531 }
532
533 void
534 intel_flush(GLcontext *ctx)
535 {
536 struct intel_context *intel = intel_context(ctx);
537
538 if (intel->Fallback)
539 _swrast_flush(ctx);
540
541 if (intel->gen < 4)
542 INTEL_FIREVERTICES(intel);
543
544 if (intel->batch->map != intel->batch->ptr)
545 intel_batchbuffer_flush(intel->batch);
546 }
547
548 static void
549 intel_glFlush(GLcontext *ctx)
550 {
551 struct intel_context *intel = intel_context(ctx);
552
553 intel_flush(ctx);
554 intel_flush_front(ctx);
555 intel->need_throttle = GL_TRUE;
556 }
557
558 void
559 intelFinish(GLcontext * ctx)
560 {
561 struct gl_framebuffer *fb = ctx->DrawBuffer;
562 int i;
563
564 intel_flush(ctx);
565 intel_flush_front(ctx);
566
567 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
568 struct intel_renderbuffer *irb;
569
570 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
571
572 if (irb && irb->region)
573 drm_intel_bo_wait_rendering(irb->region->buffer);
574 }
575 if (fb->_DepthBuffer) {
576 /* XXX: Wait on buffer idle */
577 }
578 }
579
580 void
581 intelInitDriverFunctions(struct dd_function_table *functions)
582 {
583 _mesa_init_driver_functions(functions);
584
585 functions->Flush = intel_glFlush;
586 functions->Finish = intelFinish;
587 functions->GetString = intelGetString;
588 functions->UpdateState = intelInvalidateState;
589
590 intelInitTextureFuncs(functions);
591 intelInitTextureImageFuncs(functions);
592 intelInitTextureSubImageFuncs(functions);
593 intelInitTextureCopyImageFuncs(functions);
594 intelInitStateFuncs(functions);
595 intelInitClearFuncs(functions);
596 intelInitBufferFuncs(functions);
597 intelInitPixelFuncs(functions);
598 intelInitBufferObjectFuncs(functions);
599 intel_init_syncobj_functions(functions);
600 }
601
602
603 GLboolean
604 intelInitContext(struct intel_context *intel,
605 int api,
606 const __GLcontextModes * mesaVis,
607 __DRIcontext * driContextPriv,
608 void *sharedContextPrivate,
609 struct dd_function_table *functions)
610 {
611 GLcontext *ctx = &intel->ctx;
612 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
613 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
614 struct intel_screen *intelScreen = sPriv->private;
615 int bo_reuse_mode;
616 __GLcontextModes visual;
617
618 /* we can't do anything without a connection to the device */
619 if (intelScreen->bufmgr == NULL)
620 return GL_FALSE;
621
622 /* Can't rely on invalidate events, fall back to glViewport hack */
623 if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
624 intel->saved_viewport = functions->Viewport;
625 functions->Viewport = intel_viewport;
626 }
627
628 if (mesaVis == NULL) {
629 memset(&visual, 0, sizeof visual);
630 mesaVis = &visual;
631 }
632
633 if (!_mesa_initialize_context_for_api(&intel->ctx, api, mesaVis, shareCtx,
634 functions, (void *) intel)) {
635 printf("%s: failed to init mesa context\n", __FUNCTION__);
636 return GL_FALSE;
637 }
638
639 driContextPriv->driverPrivate = intel;
640 intel->intelScreen = intelScreen;
641 intel->driContext = driContextPriv;
642 intel->driFd = sPriv->fd;
643
644 intel->has_xrgb_textures = GL_TRUE;
645 if (IS_GEN6(intel->intelScreen->deviceID)) {
646 intel->gen = 6;
647 intel->needs_ff_sync = GL_TRUE;
648 intel->has_luminance_srgb = GL_TRUE;
649 } else if (IS_GEN5(intel->intelScreen->deviceID)) {
650 intel->gen = 5;
651 intel->needs_ff_sync = GL_TRUE;
652 intel->has_luminance_srgb = GL_TRUE;
653 } else if (IS_965(intel->intelScreen->deviceID)) {
654 intel->gen = 4;
655 if (IS_G4X(intel->intelScreen->deviceID)) {
656 intel->has_luminance_srgb = GL_TRUE;
657 intel->is_g4x = GL_TRUE;
658 }
659 } else if (IS_9XX(intel->intelScreen->deviceID)) {
660 intel->gen = 3;
661 if (IS_945(intel->intelScreen->deviceID)) {
662 intel->is_945 = GL_TRUE;
663 }
664 } else {
665 intel->gen = 2;
666 if (intel->intelScreen->deviceID == PCI_CHIP_I830_M ||
667 intel->intelScreen->deviceID == PCI_CHIP_845_G) {
668 intel->has_xrgb_textures = GL_FALSE;
669 }
670 }
671
672 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
673 sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
674 if (intelScreen->deviceID == PCI_CHIP_I865_G)
675 intel->maxBatchSize = 4096;
676 else
677 intel->maxBatchSize = BATCH_SZ;
678
679 intel->bufmgr = intelScreen->bufmgr;
680
681 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
682 switch (bo_reuse_mode) {
683 case DRI_CONF_BO_REUSE_DISABLED:
684 break;
685 case DRI_CONF_BO_REUSE_ALL:
686 intel_bufmgr_gem_enable_reuse(intel->bufmgr);
687 break;
688 }
689
690 /* This doesn't yet catch all non-conformant rendering, but it's a
691 * start.
692 */
693 if (getenv("INTEL_STRICT_CONFORMANCE")) {
694 unsigned int value = atoi(getenv("INTEL_STRICT_CONFORMANCE"));
695 if (value > 0) {
696 intel->conformance_mode = value;
697 }
698 else {
699 intel->conformance_mode = 1;
700 }
701 }
702
703 if (intel->conformance_mode > 0) {
704 ctx->Const.MinLineWidth = 1.0;
705 ctx->Const.MinLineWidthAA = 1.0;
706 ctx->Const.MaxLineWidth = 1.0;
707 ctx->Const.MaxLineWidthAA = 1.0;
708 ctx->Const.LineWidthGranularity = 1.0;
709 }
710 else {
711 ctx->Const.MinLineWidth = 1.0;
712 ctx->Const.MinLineWidthAA = 1.0;
713 ctx->Const.MaxLineWidth = 5.0;
714 ctx->Const.MaxLineWidthAA = 5.0;
715 ctx->Const.LineWidthGranularity = 0.5;
716 }
717
718 ctx->Const.MinPointSize = 1.0;
719 ctx->Const.MinPointSizeAA = 1.0;
720 ctx->Const.MaxPointSize = 255.0;
721 ctx->Const.MaxPointSizeAA = 3.0;
722 ctx->Const.PointSizeGranularity = 1.0;
723
724 ctx->Const.MaxSamples = 1.0;
725
726 /* reinitialize the context point state.
727 * It depend on constants in __GLcontextRec::Const
728 */
729 _mesa_init_point(ctx);
730
731 meta_init_metaops(ctx, &intel->meta);
732 if (intel->gen >= 4) {
733 if (MAX_WIDTH > 8192)
734 ctx->Const.MaxRenderbufferSize = 8192;
735 } else {
736 if (MAX_WIDTH > 2048)
737 ctx->Const.MaxRenderbufferSize = 2048;
738 }
739
740 /* Initialize the software rasterizer and helper modules. */
741 _swrast_CreateContext(ctx);
742 _vbo_CreateContext(ctx);
743 _tnl_CreateContext(ctx);
744 _swsetup_CreateContext(ctx);
745
746 /* Configure swrast to match hardware characteristics: */
747 _swrast_allow_pixel_fog(ctx, GL_FALSE);
748 _swrast_allow_vertex_fog(ctx, GL_TRUE);
749
750 _mesa_meta_init(ctx);
751
752 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
753 intel->hw_stipple = 1;
754
755 /* XXX FBO: this doesn't seem to be used anywhere */
756 switch (mesaVis->depthBits) {
757 case 0: /* what to do in this case? */
758 case 16:
759 intel->polygon_offset_scale = 1.0;
760 break;
761 case 24:
762 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
763 break;
764 default:
765 assert(0);
766 break;
767 }
768
769 if (intel->gen >= 4)
770 intel->polygon_offset_scale /= 0xffff;
771
772 intel->RenderIndex = ~0;
773
774 switch (ctx->API) {
775 case API_OPENGL:
776 intelInitExtensions(ctx);
777 break;
778 case API_OPENGLES:
779 break;
780 case API_OPENGLES2:
781 intelInitExtensionsES2(ctx);
782 break;
783 }
784
785 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
786 if (INTEL_DEBUG & DEBUG_BUFMGR)
787 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
788
789 intel->batch = intel_batchbuffer_alloc(intel);
790
791 intel_fbo_init(intel);
792
793 if (intel->ctx.Mesa_DXTn) {
794 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
795 _mesa_enable_extension(ctx, "GL_S3_s3tc");
796 }
797 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
798 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
799 }
800 intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
801 "texture_tiling");
802 intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
803
804 intel->prim.primitive = ~0;
805
806 /* Force all software fallbacks */
807 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
808 fprintf(stderr, "disabling 3D rasterization\n");
809 intel->no_rast = 1;
810 }
811
812 if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
813 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
814 intel->always_flush_batch = 1;
815 }
816
817 if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
818 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
819 intel->always_flush_cache = 1;
820 }
821
822 /* Disable all hardware rendering (skip emitting batches and fences/waits
823 * to the kernel)
824 */
825 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
826
827 return GL_TRUE;
828 }
829
830 void
831 intelDestroyContext(__DRIcontext * driContextPriv)
832 {
833 struct intel_context *intel =
834 (struct intel_context *) driContextPriv->driverPrivate;
835
836 assert(intel); /* should never be null */
837 if (intel) {
838 INTEL_FIREVERTICES(intel);
839
840 _mesa_meta_free(&intel->ctx);
841
842 meta_destroy_metaops(&intel->meta);
843
844 intel->vtbl.destroy(intel);
845
846 _swsetup_DestroyContext(&intel->ctx);
847 _tnl_DestroyContext(&intel->ctx);
848 _vbo_DestroyContext(&intel->ctx);
849
850 _swrast_DestroyContext(&intel->ctx);
851 intel->Fallback = 0x0; /* don't call _swrast_Flush later */
852
853 intel_batchbuffer_free(intel->batch);
854 intel->batch = NULL;
855
856 free(intel->prim.vb);
857 intel->prim.vb = NULL;
858 drm_intel_bo_unreference(intel->prim.vb_bo);
859 intel->prim.vb_bo = NULL;
860 drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
861 intel->first_post_swapbuffers_batch = NULL;
862
863 driDestroyOptionCache(&intel->optionCache);
864
865 /* free the Mesa context */
866 _mesa_free_context_data(&intel->ctx);
867
868 FREE(intel);
869 driContextPriv->driverPrivate = NULL;
870 }
871 }
872
873 GLboolean
874 intelUnbindContext(__DRIcontext * driContextPriv)
875 {
876 /* Unset current context and dispath table */
877 _mesa_make_current(NULL, NULL, NULL);
878
879 return GL_TRUE;
880 }
881
882 GLboolean
883 intelMakeCurrent(__DRIcontext * driContextPriv,
884 __DRIdrawable * driDrawPriv,
885 __DRIdrawable * driReadPriv)
886 {
887 struct intel_context *intel;
888 GET_CURRENT_CONTEXT(curCtx);
889
890 if (driContextPriv)
891 intel = (struct intel_context *) driContextPriv->driverPrivate;
892 else
893 intel = NULL;
894
895 /* According to the glXMakeCurrent() man page: "Pending commands to
896 * the previous context, if any, are flushed before it is released."
897 * But only flush if we're actually changing contexts.
898 */
899 if (intel_context(curCtx) && intel_context(curCtx) != intel) {
900 _mesa_flush(curCtx);
901 }
902
903 if (driContextPriv) {
904 struct gl_framebuffer *fb, *readFb;
905
906 if (driDrawPriv == NULL && driReadPriv == NULL) {
907 fb = _mesa_get_incomplete_framebuffer();
908 readFb = _mesa_get_incomplete_framebuffer();
909 } else {
910 fb = driDrawPriv->driverPrivate;
911 readFb = driReadPriv->driverPrivate;
912 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
913 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
914 }
915
916 intel_prepare_render(intel);
917 _mesa_make_current(&intel->ctx, fb, readFb);
918
919 /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
920 * is NULL at that point. We can't call _mesa_makecurrent()
921 * first, since we need the buffer size for the initial
922 * viewport. So just call intel_draw_buffer() again here. */
923 intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
924 }
925 else {
926 _mesa_make_current(NULL, NULL, NULL);
927 }
928
929 return GL_TRUE;
930 }