Merge branch 'mesa_7_5_branch' into dlist-statechange-shortcircuit
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/arrayobj.h"
32 #include "main/extensions.h"
33 #include "main/framebuffer.h"
34 #include "main/imports.h"
35 #include "main/points.h"
36
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "tnl/tnl.h"
40 #include "drivers/common/driverfuncs.h"
41
42 #include "i830_dri.h"
43
44 #include "intel_chipset.h"
45 #include "intel_buffers.h"
46 #include "intel_tex.h"
47 #include "intel_batchbuffer.h"
48 #include "intel_clear.h"
49 #include "intel_extensions.h"
50 #include "intel_pixel.h"
51 #include "intel_regions.h"
52 #include "intel_buffer_objects.h"
53 #include "intel_fbo.h"
54 #include "intel_decode.h"
55 #include "intel_bufmgr.h"
56 #include "intel_screen.h"
57 #include "intel_swapbuffers.h"
58
59 #include "drirenderbuffer.h"
60 #include "vblank.h"
61 #include "utils.h"
62 #include "xmlpool.h" /* for symbolic values of enum-type options */
63
64
65 #ifndef INTEL_DEBUG
66 int INTEL_DEBUG = (0);
67 #endif
68
69
70 #define DRIVER_DATE "20090114"
71 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
72
73
74 static void intel_flush(GLcontext *ctx, GLboolean needs_mi_flush);
75
76 static const GLubyte *
77 intelGetString(GLcontext * ctx, GLenum name)
78 {
79 const struct intel_context *const intel = intel_context(ctx);
80 const char *chipset;
81 static char buffer[128];
82
83 switch (name) {
84 case GL_VENDOR:
85 return (GLubyte *) "Tungsten Graphics, Inc";
86 break;
87
88 case GL_RENDERER:
89 switch (intel->intelScreen->deviceID) {
90 case PCI_CHIP_845_G:
91 chipset = "Intel(R) 845G";
92 break;
93 case PCI_CHIP_I830_M:
94 chipset = "Intel(R) 830M";
95 break;
96 case PCI_CHIP_I855_GM:
97 chipset = "Intel(R) 852GM/855GM";
98 break;
99 case PCI_CHIP_I865_G:
100 chipset = "Intel(R) 865G";
101 break;
102 case PCI_CHIP_I915_G:
103 chipset = "Intel(R) 915G";
104 break;
105 case PCI_CHIP_E7221_G:
106 chipset = "Intel (R) E7221G (i915)";
107 break;
108 case PCI_CHIP_I915_GM:
109 chipset = "Intel(R) 915GM";
110 break;
111 case PCI_CHIP_I945_G:
112 chipset = "Intel(R) 945G";
113 break;
114 case PCI_CHIP_I945_GM:
115 chipset = "Intel(R) 945GM";
116 break;
117 case PCI_CHIP_I945_GME:
118 chipset = "Intel(R) 945GME";
119 break;
120 case PCI_CHIP_G33_G:
121 chipset = "Intel(R) G33";
122 break;
123 case PCI_CHIP_Q35_G:
124 chipset = "Intel(R) Q35";
125 break;
126 case PCI_CHIP_Q33_G:
127 chipset = "Intel(R) Q33";
128 break;
129 case PCI_CHIP_IGD_GM:
130 case PCI_CHIP_IGD_G:
131 chipset = "Intel(R) IGD";
132 break;
133 case PCI_CHIP_I965_Q:
134 chipset = "Intel(R) 965Q";
135 break;
136 case PCI_CHIP_I965_G:
137 case PCI_CHIP_I965_G_1:
138 chipset = "Intel(R) 965G";
139 break;
140 case PCI_CHIP_I946_GZ:
141 chipset = "Intel(R) 946GZ";
142 break;
143 case PCI_CHIP_I965_GM:
144 chipset = "Intel(R) 965GM";
145 break;
146 case PCI_CHIP_I965_GME:
147 chipset = "Intel(R) 965GME/GLE";
148 break;
149 case PCI_CHIP_GM45_GM:
150 chipset = "Mobile IntelĀ® GM45 Express Chipset";
151 break;
152 case PCI_CHIP_IGD_E_G:
153 chipset = "Intel(R) Integrated Graphics Device";
154 break;
155 case PCI_CHIP_G45_G:
156 chipset = "Intel(R) G45/G43";
157 break;
158 case PCI_CHIP_Q45_G:
159 chipset = "Intel(R) Q45/Q43";
160 break;
161 case PCI_CHIP_G41_G:
162 chipset = "Intel(R) G41";
163 break;
164 default:
165 chipset = "Unknown Intel Chipset";
166 break;
167 }
168
169 (void) driGetRendererString(buffer, chipset,
170 (intel->ttm) ? DRIVER_DATE_GEM : DRIVER_DATE,
171 0);
172 return (GLubyte *) buffer;
173
174 default:
175 return NULL;
176 }
177 }
178
179 static unsigned
180 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
181 {
182 switch (rb->Base._ActualFormat) {
183 case GL_RGB5:
184 case GL_DEPTH_COMPONENT16:
185 return 16;
186 case GL_RGB8:
187 case GL_RGBA8:
188 case GL_DEPTH_COMPONENT24:
189 case GL_DEPTH24_STENCIL8_EXT:
190 case GL_STENCIL_INDEX8_EXT:
191 return 32;
192 default:
193 return 0;
194 }
195 }
196
197 void
198 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
199 {
200 struct intel_framebuffer *intel_fb = drawable->driverPrivate;
201 struct intel_renderbuffer *rb;
202 struct intel_region *region, *depth_region;
203 struct intel_context *intel = context->driverPrivate;
204 __DRIbuffer *buffers = NULL;
205 __DRIscreen *screen;
206 int i, count;
207 unsigned int attachments[10];
208 uint32_t name;
209 const char *region_name;
210
211 if (INTEL_DEBUG & DEBUG_DRI)
212 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
213
214 screen = intel->intelScreen->driScrnPriv;
215
216 if (screen->dri2.loader
217 && (screen->dri2.loader->base.version > 2)
218 && (screen->dri2.loader->getBuffersWithFormat != NULL)) {
219 struct intel_renderbuffer *depth_rb;
220 struct intel_renderbuffer *stencil_rb;
221
222 i = 0;
223 if ((intel->is_front_buffer_rendering || !intel_fb->color_rb[1])
224 && intel_fb->color_rb[0]) {
225 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
226 attachments[i++] = intel_bits_per_pixel(intel_fb->color_rb[0]);
227 }
228
229 if (intel_fb->color_rb[1]) {
230 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
231 attachments[i++] = intel_bits_per_pixel(intel_fb->color_rb[1]);
232 }
233
234 depth_rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
235 stencil_rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
236
237 if ((depth_rb != NULL) && (stencil_rb != NULL)) {
238 attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
239 attachments[i++] = intel_bits_per_pixel(depth_rb);
240 } else if (depth_rb != NULL) {
241 attachments[i++] = __DRI_BUFFER_DEPTH;
242 attachments[i++] = intel_bits_per_pixel(depth_rb);
243 } else if (stencil_rb != NULL) {
244 attachments[i++] = __DRI_BUFFER_STENCIL;
245 attachments[i++] = intel_bits_per_pixel(stencil_rb);
246 }
247
248 buffers =
249 (*screen->dri2.loader->getBuffersWithFormat)(drawable,
250 &drawable->w,
251 &drawable->h,
252 attachments, i / 2,
253 &count,
254 drawable->loaderPrivate);
255 } else if (screen->dri2.loader) {
256 i = 0;
257 if (intel_fb->color_rb[0])
258 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
259 if (intel_fb->color_rb[1])
260 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
261 if (intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH))
262 attachments[i++] = __DRI_BUFFER_DEPTH;
263 if (intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL))
264 attachments[i++] = __DRI_BUFFER_STENCIL;
265
266 buffers = (*screen->dri2.loader->getBuffers)(drawable,
267 &drawable->w,
268 &drawable->h,
269 attachments, i,
270 &count,
271 drawable->loaderPrivate);
272 }
273
274 if (buffers == NULL)
275 return;
276
277 drawable->x = 0;
278 drawable->y = 0;
279 drawable->backX = 0;
280 drawable->backY = 0;
281 drawable->numClipRects = 1;
282 drawable->pClipRects[0].x1 = 0;
283 drawable->pClipRects[0].y1 = 0;
284 drawable->pClipRects[0].x2 = drawable->w;
285 drawable->pClipRects[0].y2 = drawable->h;
286 drawable->numBackClipRects = 1;
287 drawable->pBackClipRects[0].x1 = 0;
288 drawable->pBackClipRects[0].y1 = 0;
289 drawable->pBackClipRects[0].x2 = drawable->w;
290 drawable->pBackClipRects[0].y2 = drawable->h;
291
292 depth_region = NULL;
293 for (i = 0; i < count; i++) {
294 switch (buffers[i].attachment) {
295 case __DRI_BUFFER_FRONT_LEFT:
296 rb = intel_fb->color_rb[0];
297 region_name = "dri2 front buffer";
298 break;
299
300 case __DRI_BUFFER_FAKE_FRONT_LEFT:
301 rb = intel_fb->color_rb[0];
302 region_name = "dri2 fake front buffer";
303 break;
304
305 case __DRI_BUFFER_BACK_LEFT:
306 rb = intel_fb->color_rb[1];
307 region_name = "dri2 back buffer";
308 break;
309
310 case __DRI_BUFFER_DEPTH:
311 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
312 region_name = "dri2 depth buffer";
313 break;
314
315 case __DRI_BUFFER_DEPTH_STENCIL:
316 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
317 region_name = "dri2 depth / stencil buffer";
318 break;
319
320 case __DRI_BUFFER_STENCIL:
321 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
322 region_name = "dri2 stencil buffer";
323 break;
324
325 case __DRI_BUFFER_ACCUM:
326 default:
327 fprintf(stderr,
328 "unhandled buffer attach event, attacment type %d\n",
329 buffers[i].attachment);
330 return;
331 }
332
333 if (rb == NULL)
334 continue;
335
336 if (rb->region) {
337 dri_bo_flink(rb->region->buffer, &name);
338 if (name == buffers[i].name)
339 continue;
340 }
341
342 if (INTEL_DEBUG & DEBUG_DRI)
343 fprintf(stderr,
344 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
345 buffers[i].name, buffers[i].attachment,
346 buffers[i].cpp, buffers[i].pitch);
347
348 if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
349 if (INTEL_DEBUG & DEBUG_DRI)
350 fprintf(stderr, "(reusing depth buffer as stencil)\n");
351 intel_region_reference(&region, depth_region);
352 }
353 else
354 region = intel_region_alloc_for_handle(intel, buffers[i].cpp,
355 drawable->w,
356 drawable->h,
357 buffers[i].pitch / buffers[i].cpp,
358 buffers[i].name,
359 region_name);
360
361 if (buffers[i].attachment == __DRI_BUFFER_DEPTH)
362 depth_region = region;
363
364 intel_renderbuffer_set_region(rb, region);
365 intel_region_release(&region);
366
367 if (buffers[i].attachment == __DRI_BUFFER_DEPTH_STENCIL) {
368 rb = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
369 if (rb != NULL) {
370 struct intel_region *stencil_region = NULL;
371
372 if (rb->region) {
373 dri_bo_flink(rb->region->buffer, &name);
374 if (name == buffers[i].name)
375 continue;
376 }
377
378 intel_region_reference(&stencil_region, region);
379 intel_renderbuffer_set_region(rb, stencil_region);
380 intel_region_release(&stencil_region);
381 }
382 }
383 }
384
385 driUpdateFramebufferSize(&intel->ctx, drawable);
386 }
387
388 void
389 intel_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
390 {
391 struct intel_context *intel = intel_context(ctx);
392 __DRIcontext *driContext = intel->driContext;
393 void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
394 GLsizei w, GLsizei h);
395
396 if (!driContext->driScreenPriv->dri2.enabled)
397 return;
398
399 if (!intel->internal_viewport_call && ctx->DrawBuffer->Name == 0) {
400 /* If we're rendering to the fake front buffer, make sure all the pending
401 * drawing has landed on the real front buffer. Otherwise when we
402 * eventually get to DRI2GetBuffersWithFormat the stale real front
403 * buffer contents will get copied to the new fake front buffer.
404 */
405 if (intel->is_front_buffer_rendering) {
406 intel_flush(ctx, GL_FALSE);
407 }
408
409 intel_update_renderbuffers(driContext, driContext->driDrawablePriv);
410 if (driContext->driDrawablePriv != driContext->driReadablePriv)
411 intel_update_renderbuffers(driContext, driContext->driReadablePriv);
412 }
413
414 old_viewport = ctx->Driver.Viewport;
415 ctx->Driver.Viewport = NULL;
416 intel->driDrawable = driContext->driDrawablePriv;
417 intelWindowMoved(intel);
418 intel_draw_buffer(ctx, intel->ctx.DrawBuffer);
419 ctx->Driver.Viewport = old_viewport;
420 }
421
422
423 static const struct dri_debug_control debug_control[] = {
424 { "tex", DEBUG_TEXTURE},
425 { "state", DEBUG_STATE},
426 { "ioctl", DEBUG_IOCTL},
427 { "blit", DEBUG_BLIT},
428 { "mip", DEBUG_MIPTREE},
429 { "fall", DEBUG_FALLBACKS},
430 { "verb", DEBUG_VERBOSE},
431 { "bat", DEBUG_BATCH},
432 { "pix", DEBUG_PIXEL},
433 { "buf", DEBUG_BUFMGR},
434 { "reg", DEBUG_REGION},
435 { "fbo", DEBUG_FBO},
436 { "lock", DEBUG_LOCK},
437 { "sync", DEBUG_SYNC},
438 { "prim", DEBUG_PRIMS },
439 { "vert", DEBUG_VERTS },
440 { "dri", DEBUG_DRI },
441 { "dma", DEBUG_DMA },
442 { "san", DEBUG_SANITY },
443 { "sleep", DEBUG_SLEEP },
444 { "stats", DEBUG_STATS },
445 { "tile", DEBUG_TILE },
446 { "sing", DEBUG_SINGLE_THREAD },
447 { "thre", DEBUG_SINGLE_THREAD },
448 { "wm", DEBUG_WM },
449 { "urb", DEBUG_URB },
450 { "vs", DEBUG_VS },
451 { NULL, 0 }
452 };
453
454
455 static void
456 intelInvalidateState(GLcontext * ctx, GLuint new_state)
457 {
458 struct intel_context *intel = intel_context(ctx);
459
460 _swrast_InvalidateState(ctx, new_state);
461 _swsetup_InvalidateState(ctx, new_state);
462 _vbo_InvalidateState(ctx, new_state);
463 _tnl_InvalidateState(ctx, new_state);
464 _tnl_invalidate_vertex_state(ctx, new_state);
465
466 intel->NewGLState |= new_state;
467
468 if (intel->vtbl.invalidate_state)
469 intel->vtbl.invalidate_state( intel, new_state );
470 }
471
472 static void
473 intel_flush(GLcontext *ctx, GLboolean needs_mi_flush)
474 {
475 struct intel_context *intel = intel_context(ctx);
476
477 if (intel->Fallback)
478 _swrast_flush(ctx);
479
480 if (!IS_965(intel->intelScreen->deviceID))
481 INTEL_FIREVERTICES(intel);
482
483 /* Emit a flush so that any frontbuffer rendering that might have occurred
484 * lands onscreen in a timely manner, even if the X Server doesn't trigger
485 * a flush for us.
486 */
487 if (needs_mi_flush)
488 intel_batchbuffer_emit_mi_flush(intel->batch);
489
490 if (intel->batch->map != intel->batch->ptr)
491 intel_batchbuffer_flush(intel->batch);
492
493 if ((ctx->DrawBuffer->Name == 0) && intel->front_buffer_dirty) {
494 __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
495
496 if (screen->dri2.loader &&
497 (screen->dri2.loader->base.version >= 2)
498 && (screen->dri2.loader->flushFrontBuffer != NULL)) {
499 (*screen->dri2.loader->flushFrontBuffer)(intel->driDrawable,
500 intel->driDrawable->loaderPrivate);
501
502 /* Only clear the dirty bit if front-buffer rendering is no longer
503 * enabled. This is done so that the dirty bit can only be set in
504 * glDrawBuffer. Otherwise the dirty bit would have to be set at
505 * each of N places that do rendering. This has worse performances,
506 * but it is much easier to get correct.
507 */
508 if (intel->is_front_buffer_rendering) {
509 intel->front_buffer_dirty = GL_FALSE;
510 }
511 }
512 }
513 }
514
515 void
516 intelFlush(GLcontext * ctx)
517 {
518 intel_flush(ctx, GL_FALSE);
519 }
520
521 static void
522 intel_glFlush(GLcontext *ctx)
523 {
524 intel_flush(ctx, GL_TRUE);
525 }
526
527 void
528 intelFinish(GLcontext * ctx)
529 {
530 struct gl_framebuffer *fb = ctx->DrawBuffer;
531 int i;
532
533 intelFlush(ctx);
534
535 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
536 struct intel_renderbuffer *irb;
537
538 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
539
540 if (irb && irb->region)
541 dri_bo_wait_rendering(irb->region->buffer);
542 }
543 if (fb->_DepthBuffer) {
544 /* XXX: Wait on buffer idle */
545 }
546 }
547
548 void
549 intelInitDriverFunctions(struct dd_function_table *functions)
550 {
551 _mesa_init_driver_functions(functions);
552
553 functions->Flush = intel_glFlush;
554 functions->Finish = intelFinish;
555 functions->GetString = intelGetString;
556 functions->UpdateState = intelInvalidateState;
557
558 functions->CopyColorTable = _swrast_CopyColorTable;
559 functions->CopyColorSubTable = _swrast_CopyColorSubTable;
560 functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
561 functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
562
563 intelInitTextureFuncs(functions);
564 intelInitStateFuncs(functions);
565 intelInitClearFuncs(functions);
566 intelInitBufferFuncs(functions);
567 intelInitPixelFuncs(functions);
568 }
569
570
571 GLboolean
572 intelInitContext(struct intel_context *intel,
573 const __GLcontextModes * mesaVis,
574 __DRIcontextPrivate * driContextPriv,
575 void *sharedContextPrivate,
576 struct dd_function_table *functions)
577 {
578 GLcontext *ctx = &intel->ctx;
579 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
580 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
581 intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
582 int fthrottle_mode;
583
584 if (!_mesa_initialize_context(&intel->ctx, mesaVis, shareCtx,
585 functions, (void *) intel)) {
586 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
587 return GL_FALSE;
588 }
589
590 driContextPriv->driverPrivate = intel;
591 intel->intelScreen = intelScreen;
592 intel->driScreen = sPriv;
593 intel->sarea = intelScreen->sarea;
594 intel->driContext = driContextPriv;
595
596 /* Dri stuff */
597 intel->hHWContext = driContextPriv->hHWContext;
598 intel->driFd = sPriv->fd;
599 intel->driHwLock = sPriv->lock;
600
601 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
602 intel->driScreen->myNum,
603 IS_965(intelScreen->deviceID) ? "i965" : "i915");
604 if (intelScreen->deviceID == PCI_CHIP_I865_G)
605 intel->maxBatchSize = 4096;
606 else
607 intel->maxBatchSize = BATCH_SZ;
608
609 intel->bufmgr = intelScreen->bufmgr;
610 intel->ttm = intelScreen->ttm;
611 if (intel->ttm) {
612 int bo_reuse_mode;
613
614 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
615 switch (bo_reuse_mode) {
616 case DRI_CONF_BO_REUSE_DISABLED:
617 break;
618 case DRI_CONF_BO_REUSE_ALL:
619 intel_bufmgr_gem_enable_reuse(intel->bufmgr);
620 break;
621 }
622 }
623
624 /* This doesn't yet catch all non-conformant rendering, but it's a
625 * start.
626 */
627 if (getenv("INTEL_STRICT_CONFORMANCE")) {
628 unsigned int value = atoi(getenv("INTEL_STRICT_CONFORMANCE"));
629 if (value > 0) {
630 intel->conformance_mode = value;
631 }
632 else {
633 intel->conformance_mode = 1;
634 }
635 }
636
637 if (intel->conformance_mode > 0) {
638 ctx->Const.MinLineWidth = 1.0;
639 ctx->Const.MinLineWidthAA = 1.0;
640 ctx->Const.MaxLineWidth = 1.0;
641 ctx->Const.MaxLineWidthAA = 1.0;
642 ctx->Const.LineWidthGranularity = 1.0;
643 }
644 else {
645 ctx->Const.MinLineWidth = 1.0;
646 ctx->Const.MinLineWidthAA = 1.0;
647 ctx->Const.MaxLineWidth = 5.0;
648 ctx->Const.MaxLineWidthAA = 5.0;
649 ctx->Const.LineWidthGranularity = 0.5;
650 }
651
652 ctx->Const.MinPointSize = 1.0;
653 ctx->Const.MinPointSizeAA = 1.0;
654 ctx->Const.MaxPointSize = 255.0;
655 ctx->Const.MaxPointSizeAA = 3.0;
656 ctx->Const.PointSizeGranularity = 1.0;
657
658 /* reinitialize the context point state.
659 * It depend on constants in __GLcontextRec::Const
660 */
661 _mesa_init_point(ctx);
662
663 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
664
665 /* Initialize the software rasterizer and helper modules. */
666 _swrast_CreateContext(ctx);
667 _vbo_CreateContext(ctx);
668 _tnl_CreateContext(ctx);
669 _swsetup_CreateContext(ctx);
670
671 /* Configure swrast to match hardware characteristics: */
672 _swrast_allow_pixel_fog(ctx, GL_FALSE);
673 _swrast_allow_vertex_fog(ctx, GL_TRUE);
674
675 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
676 intel->hw_stipple = 1;
677
678 /* XXX FBO: this doesn't seem to be used anywhere */
679 switch (mesaVis->depthBits) {
680 case 0: /* what to do in this case? */
681 case 16:
682 intel->polygon_offset_scale = 1.0;
683 break;
684 case 24:
685 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
686 break;
687 default:
688 assert(0);
689 break;
690 }
691
692 if (IS_965(intelScreen->deviceID))
693 intel->polygon_offset_scale /= 0xffff;
694
695 intel->RenderIndex = ~0;
696
697 fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
698 intel->irqsEmitted = 0;
699
700 intel->do_irqs = (intel->intelScreen->irq_active &&
701 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
702
703 intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
704
705 if (IS_965(intelScreen->deviceID) && !intel->intelScreen->irq_active) {
706 _mesa_printf("IRQs not active. Exiting\n");
707 exit(1);
708 }
709
710 intelInitExtensions(ctx, GL_FALSE);
711
712 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
713 if (INTEL_DEBUG & DEBUG_BUFMGR)
714 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
715
716 if (!sPriv->dri2.enabled)
717 intel_recreate_static_regions(intel);
718
719 intel->batch = intel_batchbuffer_alloc(intel);
720
721 intel_bufferobj_init(intel);
722 intel_fbo_init(intel);
723
724 if (intel->ctx.Mesa_DXTn) {
725 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
726 _mesa_enable_extension(ctx, "GL_S3_s3tc");
727 }
728 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
729 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
730 }
731
732 intel->prim.primitive = ~0;
733
734 /* Force all software fallbacks */
735 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
736 fprintf(stderr, "disabling 3D rasterization\n");
737 intel->no_rast = 1;
738 }
739
740 if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
741 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
742 intel->always_flush_batch = 1;
743 }
744
745 if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
746 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
747 intel->always_flush_cache = 1;
748 }
749
750 /* Disable all hardware rendering (skip emitting batches and fences/waits
751 * to the kernel)
752 */
753 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
754
755 return GL_TRUE;
756 }
757
758 void
759 intelDestroyContext(__DRIcontextPrivate * driContextPriv)
760 {
761 struct intel_context *intel =
762 (struct intel_context *) driContextPriv->driverPrivate;
763
764 assert(intel); /* should never be null */
765 if (intel) {
766 GLboolean release_texture_heaps;
767
768 INTEL_FIREVERTICES(intel);
769
770 if (intel->clear.arrayObj)
771 _mesa_delete_array_object(&intel->ctx, intel->clear.arrayObj);
772
773 intel->vtbl.destroy(intel);
774
775 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
776 _swsetup_DestroyContext(&intel->ctx);
777 _tnl_DestroyContext(&intel->ctx);
778 _vbo_DestroyContext(&intel->ctx);
779
780 _swrast_DestroyContext(&intel->ctx);
781 intel->Fallback = 0; /* don't call _swrast_Flush later */
782
783 intel_batchbuffer_free(intel->batch);
784 intel->batch = NULL;
785
786 free(intel->prim.vb);
787 intel->prim.vb = NULL;
788 dri_bo_unreference(intel->prim.vb_bo);
789 intel->prim.vb_bo = NULL;
790
791 if (release_texture_heaps) {
792 /* This share group is about to go away, free our private
793 * texture object data.
794 */
795 if (INTEL_DEBUG & DEBUG_TEXTURE)
796 fprintf(stderr, "do something to free texture heaps\n");
797 }
798
799 intel_region_release(&intel->front_region);
800 intel_region_release(&intel->back_region);
801 intel_region_release(&intel->depth_region);
802
803 driDestroyOptionCache(&intel->optionCache);
804
805 /* free the Mesa context */
806 _mesa_free_context_data(&intel->ctx);
807 }
808 }
809
810 GLboolean
811 intelUnbindContext(__DRIcontextPrivate * driContextPriv)
812 {
813 return GL_TRUE;
814 }
815
816 GLboolean
817 intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
818 __DRIdrawablePrivate * driDrawPriv,
819 __DRIdrawablePrivate * driReadPriv)
820 {
821 __DRIscreenPrivate *psp = driDrawPriv->driScreenPriv;
822
823 if (driContextPriv) {
824 struct intel_context *intel =
825 (struct intel_context *) driContextPriv->driverPrivate;
826 struct intel_framebuffer *intel_fb =
827 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
828 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
829
830 if (driContextPriv->driScreenPriv->dri2.enabled) {
831 intel_update_renderbuffers(driContextPriv, driDrawPriv);
832 if (driDrawPriv != driReadPriv)
833 intel_update_renderbuffers(driContextPriv, driReadPriv);
834 } else {
835 /* XXX FBO temporary fix-ups! */
836 /* if the renderbuffers don't have regions, init them from the context */
837 struct intel_renderbuffer *irbDepth
838 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
839 struct intel_renderbuffer *irbStencil
840 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
841
842 if (intel_fb->color_rb[0]) {
843 intel_renderbuffer_set_region(intel_fb->color_rb[0],
844 intel->front_region);
845 }
846 if (intel_fb->color_rb[1]) {
847 intel_renderbuffer_set_region(intel_fb->color_rb[1],
848 intel->back_region);
849 }
850
851 if (irbDepth) {
852 intel_renderbuffer_set_region(irbDepth, intel->depth_region);
853 }
854 if (irbStencil) {
855 intel_renderbuffer_set_region(irbStencil, intel->depth_region);
856 }
857 }
858
859 /* set GLframebuffer size to match window, if needed */
860 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
861
862 if (driReadPriv != driDrawPriv) {
863 driUpdateFramebufferSize(&intel->ctx, driReadPriv);
864 }
865
866 _mesa_make_current(&intel->ctx, &intel_fb->Base, readFb);
867
868 /* The drawbuffer won't always be updated by _mesa_make_current:
869 */
870 if (intel->ctx.DrawBuffer == &intel_fb->Base) {
871
872 if (intel->driReadDrawable != driReadPriv)
873 intel->driReadDrawable = driReadPriv;
874
875 if (intel->driDrawable != driDrawPriv) {
876 if (driDrawPriv->swap_interval == (unsigned)-1) {
877 int i;
878
879 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
880 ? driGetDefaultVBlankFlags(&intel->optionCache)
881 : VBLANK_FLAG_NO_IRQ;
882
883 /* Prevent error printf if one crtc is disabled, this will
884 * be properly calculated in intelWindowMoved() next.
885 */
886 driDrawPriv->vblFlags = intelFixupVblank(intel, driDrawPriv);
887
888 (*psp->systemTime->getUST) (&intel_fb->swap_ust);
889 driDrawableInitVBlank(driDrawPriv);
890 intel_fb->vbl_waited = driDrawPriv->vblSeq;
891
892 for (i = 0; i < 2; i++) {
893 if (intel_fb->color_rb[i])
894 intel_fb->color_rb[i]->vbl_pending = driDrawPriv->vblSeq;
895 }
896 }
897 intel->driDrawable = driDrawPriv;
898 intelWindowMoved(intel);
899 }
900
901 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
902 }
903 }
904 else {
905 _mesa_make_current(NULL, NULL, NULL);
906 }
907
908 return GL_TRUE;
909 }
910
911 static void
912 intelContendedLock(struct intel_context *intel, GLuint flags)
913 {
914 __DRIdrawablePrivate *dPriv = intel->driDrawable;
915 __DRIscreenPrivate *sPriv = intel->driScreen;
916 volatile drm_i915_sarea_t *sarea = intel->sarea;
917 int me = intel->hHWContext;
918
919 drmGetLock(intel->driFd, intel->hHWContext, flags);
920 intel->locked = 1;
921
922 if (INTEL_DEBUG & DEBUG_LOCK)
923 _mesa_printf("%s - got contended lock\n", __progname);
924
925 /* If the window moved, may need to set a new cliprect now.
926 *
927 * NOTE: This releases and regains the hw lock, so all state
928 * checking must be done *after* this call:
929 */
930 if (dPriv)
931 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
932
933 if (sarea && sarea->ctxOwner != me) {
934 if (INTEL_DEBUG & DEBUG_BUFMGR) {
935 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
936 sarea->ctxOwner, me);
937 }
938 sarea->ctxOwner = me;
939 }
940
941 /* If the last consumer of the texture memory wasn't us, notify the fake
942 * bufmgr and record the new owner. We should have the memory shared
943 * between contexts of a single fake bufmgr, but this will at least make
944 * things correct for now.
945 */
946 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
947 sarea->texAge = intel->hHWContext;
948 intel_bufmgr_fake_contended_lock_take(intel->bufmgr);
949 if (INTEL_DEBUG & DEBUG_BATCH)
950 intel_decode_context_reset();
951 if (INTEL_DEBUG & DEBUG_BUFMGR)
952 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
953 sarea->ctxOwner, intel->hHWContext);
954 }
955
956 /* Drawable changed?
957 */
958 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
959 intelWindowMoved(intel);
960 intel->lastStamp = dPriv->lastStamp;
961 }
962 }
963
964
965 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
966
967 /* Lock the hardware and validate our state.
968 */
969 void LOCK_HARDWARE( struct intel_context *intel )
970 {
971 __DRIdrawable *dPriv = intel->driDrawable;
972 __DRIscreen *sPriv = intel->driScreen;
973 char __ret = 0;
974 struct intel_framebuffer *intel_fb = NULL;
975 struct intel_renderbuffer *intel_rb = NULL;
976
977 _glthread_LOCK_MUTEX(lockMutex);
978 assert(!intel->locked);
979 intel->locked = 1;
980
981 if (intel->driDrawable) {
982 intel_fb = intel->driDrawable->driverPrivate;
983
984 if (intel_fb)
985 intel_rb =
986 intel_get_renderbuffer(&intel_fb->Base,
987 intel_fb->Base._ColorDrawBufferIndexes[0]);
988 }
989
990 if (intel_rb && dPriv->vblFlags &&
991 !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) &&
992 (intel_fb->vbl_waited - intel_rb->vbl_pending) > (1<<23)) {
993 drmVBlank vbl;
994
995 vbl.request.type = DRM_VBLANK_ABSOLUTE;
996
997 if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
998 vbl.request.type |= DRM_VBLANK_SECONDARY;
999 }
1000
1001 vbl.request.sequence = intel_rb->vbl_pending;
1002 drmWaitVBlank(intel->driFd, &vbl);
1003 intel_fb->vbl_waited = vbl.reply.sequence;
1004 }
1005
1006 if (!sPriv->dri2.enabled) {
1007 DRM_CAS(intel->driHwLock, intel->hHWContext,
1008 (DRM_LOCK_HELD|intel->hHWContext), __ret);
1009
1010 if (__ret)
1011 intelContendedLock( intel, 0 );
1012 }
1013
1014
1015 if (INTEL_DEBUG & DEBUG_LOCK)
1016 _mesa_printf("%s - locked\n", __progname);
1017 }
1018
1019
1020 /* Unlock the hardware using the global current context
1021 */
1022 void UNLOCK_HARDWARE( struct intel_context *intel )
1023 {
1024 __DRIscreen *sPriv = intel->driScreen;
1025
1026 intel->vtbl.note_unlock( intel );
1027 intel->locked = 0;
1028
1029 if (!sPriv->dri2.enabled)
1030 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
1031
1032 _glthread_UNLOCK_MUTEX(lockMutex);
1033
1034 if (INTEL_DEBUG & DEBUG_LOCK)
1035 _mesa_printf("%s - unlocked\n", __progname);
1036
1037 /**
1038 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1039 * cliprects.
1040 */
1041 if (intel->batch->cliprect_mode == REFERENCES_CLIPRECTS)
1042 intel_batchbuffer_flush(intel->batch);
1043 }
1044