intel: Only flush fake front buffer on API level glFlush()
[mesa.git] / src / mesa / drivers / dri / intel / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/extensions.h"
32 #include "main/framebuffer.h"
33 #include "main/imports.h"
34 #include "main/points.h"
35
36 #include "swrast/swrast.h"
37 #include "swrast_setup/swrast_setup.h"
38 #include "tnl/tnl.h"
39 #include "drivers/common/driverfuncs.h"
40 #include "drivers/common/meta.h"
41
42 #include "i830_dri.h"
43
44 #include "intel_chipset.h"
45 #include "intel_buffers.h"
46 #include "intel_tex.h"
47 #include "intel_batchbuffer.h"
48 #include "intel_clear.h"
49 #include "intel_extensions.h"
50 #include "intel_pixel.h"
51 #include "intel_regions.h"
52 #include "intel_buffer_objects.h"
53 #include "intel_fbo.h"
54 #include "intel_bufmgr.h"
55 #include "intel_screen.h"
56
57 #include "drirenderbuffer.h"
58 #include "utils.h"
59
60
61 #ifndef INTEL_DEBUG
62 int INTEL_DEBUG = (0);
63 #endif
64
65
66 #define DRIVER_DATE "20100330 DEVELOPMENT"
67 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
68
69
70 static const GLubyte *
71 intelGetString(GLcontext * ctx, GLenum name)
72 {
73 const struct intel_context *const intel = intel_context(ctx);
74 const char *chipset;
75 static char buffer[128];
76
77 switch (name) {
78 case GL_VENDOR:
79 return (GLubyte *) "Tungsten Graphics, Inc";
80 break;
81
82 case GL_RENDERER:
83 switch (intel->intelScreen->deviceID) {
84 case PCI_CHIP_845_G:
85 chipset = "Intel(R) 845G";
86 break;
87 case PCI_CHIP_I830_M:
88 chipset = "Intel(R) 830M";
89 break;
90 case PCI_CHIP_I855_GM:
91 chipset = "Intel(R) 852GM/855GM";
92 break;
93 case PCI_CHIP_I865_G:
94 chipset = "Intel(R) 865G";
95 break;
96 case PCI_CHIP_I915_G:
97 chipset = "Intel(R) 915G";
98 break;
99 case PCI_CHIP_E7221_G:
100 chipset = "Intel (R) E7221G (i915)";
101 break;
102 case PCI_CHIP_I915_GM:
103 chipset = "Intel(R) 915GM";
104 break;
105 case PCI_CHIP_I945_G:
106 chipset = "Intel(R) 945G";
107 break;
108 case PCI_CHIP_I945_GM:
109 chipset = "Intel(R) 945GM";
110 break;
111 case PCI_CHIP_I945_GME:
112 chipset = "Intel(R) 945GME";
113 break;
114 case PCI_CHIP_G33_G:
115 chipset = "Intel(R) G33";
116 break;
117 case PCI_CHIP_Q35_G:
118 chipset = "Intel(R) Q35";
119 break;
120 case PCI_CHIP_Q33_G:
121 chipset = "Intel(R) Q33";
122 break;
123 case PCI_CHIP_IGD_GM:
124 case PCI_CHIP_IGD_G:
125 chipset = "Intel(R) IGD";
126 break;
127 case PCI_CHIP_I965_Q:
128 chipset = "Intel(R) 965Q";
129 break;
130 case PCI_CHIP_I965_G:
131 case PCI_CHIP_I965_G_1:
132 chipset = "Intel(R) 965G";
133 break;
134 case PCI_CHIP_I946_GZ:
135 chipset = "Intel(R) 946GZ";
136 break;
137 case PCI_CHIP_I965_GM:
138 chipset = "Intel(R) 965GM";
139 break;
140 case PCI_CHIP_I965_GME:
141 chipset = "Intel(R) 965GME/GLE";
142 break;
143 case PCI_CHIP_GM45_GM:
144 chipset = "Mobile IntelĀ® GM45 Express Chipset";
145 break;
146 case PCI_CHIP_IGD_E_G:
147 chipset = "Intel(R) Integrated Graphics Device";
148 break;
149 case PCI_CHIP_G45_G:
150 chipset = "Intel(R) G45/G43";
151 break;
152 case PCI_CHIP_Q45_G:
153 chipset = "Intel(R) Q45/Q43";
154 break;
155 case PCI_CHIP_G41_G:
156 chipset = "Intel(R) G41";
157 break;
158 case PCI_CHIP_B43_G:
159 chipset = "Intel(R) B43";
160 break;
161 case PCI_CHIP_ILD_G:
162 chipset = "Intel(R) Ironlake Desktop";
163 break;
164 case PCI_CHIP_ILM_G:
165 chipset = "Intel(R) Ironlake Mobile";
166 break;
167 default:
168 chipset = "Unknown Intel Chipset";
169 break;
170 }
171
172 (void) driGetRendererString(buffer, chipset, DRIVER_DATE_GEM, 0);
173 return (GLubyte *) buffer;
174
175 default:
176 return NULL;
177 }
178 }
179
180 static void
181 intel_flush_front(GLcontext *ctx)
182 {
183 struct intel_context *intel = intel_context(ctx);
184 __DRIcontext *driContext = intel->driContext;
185 __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
186
187 if ((ctx->DrawBuffer->Name == 0) && intel->front_buffer_dirty) {
188 if (screen->dri2.loader &&
189 (screen->dri2.loader->base.version >= 2)
190 && (screen->dri2.loader->flushFrontBuffer != NULL) &&
191 driContext->driDrawablePriv &&
192 driContext->driDrawablePriv->loaderPrivate) {
193 (*screen->dri2.loader->flushFrontBuffer)(driContext->driDrawablePriv,
194 driContext->driDrawablePriv->loaderPrivate);
195
196 /* We set the dirty bit in intel_prepare_render() if we're
197 * front buffer rendering once we get there.
198 */
199 intel->front_buffer_dirty = GL_FALSE;
200 }
201 }
202 }
203
204 static unsigned
205 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
206 {
207 return _mesa_get_format_bytes(rb->Base.Format) * 8;
208 }
209
210 void
211 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
212 {
213 struct gl_framebuffer *fb = drawable->driverPrivate;
214 struct intel_renderbuffer *rb;
215 struct intel_region *region, *depth_region;
216 struct intel_context *intel = context->driverPrivate;
217 struct intel_renderbuffer *front_rb, *back_rb, *depth_rb, *stencil_rb;
218 __DRIbuffer *buffers = NULL;
219 __DRIscreen *screen;
220 int i, count;
221 unsigned int attachments[10];
222 const char *region_name;
223
224 /* If we're rendering to the fake front buffer, make sure all the
225 * pending drawing has landed on the real front buffer. Otherwise
226 * when we eventually get to DRI2GetBuffersWithFormat the stale
227 * real front buffer contents will get copied to the new fake front
228 * buffer.
229 */
230 if (intel->is_front_buffer_rendering) {
231 intel_flush(&intel->ctx, GL_FALSE);
232 intel_flush_front(&intel->ctx);
233 }
234
235 /* Set this up front, so that in case our buffers get invalidated
236 * while we're getting new buffers, we don't clobber the stamp and
237 * thus ignore the invalidate. */
238 drawable->lastStamp = drawable->dri2.stamp;
239
240 if (INTEL_DEBUG & DEBUG_DRI)
241 fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
242
243 screen = intel->intelScreen->driScrnPriv;
244
245 if (screen->dri2.loader
246 && (screen->dri2.loader->base.version > 2)
247 && (screen->dri2.loader->getBuffersWithFormat != NULL)) {
248
249 front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
250 back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
251 depth_rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
252 stencil_rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
253
254 i = 0;
255 if ((intel->is_front_buffer_rendering ||
256 intel->is_front_buffer_reading ||
257 !back_rb) && front_rb) {
258 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
259 attachments[i++] = intel_bits_per_pixel(front_rb);
260 }
261
262 if (back_rb) {
263 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
264 attachments[i++] = intel_bits_per_pixel(back_rb);
265 }
266
267 if ((depth_rb != NULL) && (stencil_rb != NULL)) {
268 attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
269 attachments[i++] = intel_bits_per_pixel(depth_rb);
270 } else if (depth_rb != NULL) {
271 attachments[i++] = __DRI_BUFFER_DEPTH;
272 attachments[i++] = intel_bits_per_pixel(depth_rb);
273 } else if (stencil_rb != NULL) {
274 attachments[i++] = __DRI_BUFFER_STENCIL;
275 attachments[i++] = intel_bits_per_pixel(stencil_rb);
276 }
277
278 buffers =
279 (*screen->dri2.loader->getBuffersWithFormat)(drawable,
280 &drawable->w,
281 &drawable->h,
282 attachments, i / 2,
283 &count,
284 drawable->loaderPrivate);
285 } else if (screen->dri2.loader) {
286 i = 0;
287 if (intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT))
288 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
289 if (intel_get_renderbuffer(fb, BUFFER_BACK_LEFT))
290 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
291 if (intel_get_renderbuffer(fb, BUFFER_DEPTH))
292 attachments[i++] = __DRI_BUFFER_DEPTH;
293 if (intel_get_renderbuffer(fb, BUFFER_STENCIL))
294 attachments[i++] = __DRI_BUFFER_STENCIL;
295
296 buffers = (*screen->dri2.loader->getBuffers)(drawable,
297 &drawable->w,
298 &drawable->h,
299 attachments, i,
300 &count,
301 drawable->loaderPrivate);
302 }
303
304 if (buffers == NULL)
305 return;
306
307 drawable->x = 0;
308 drawable->y = 0;
309 drawable->backX = 0;
310 drawable->backY = 0;
311 drawable->numClipRects = 1;
312 drawable->pClipRects[0].x1 = 0;
313 drawable->pClipRects[0].y1 = 0;
314 drawable->pClipRects[0].x2 = drawable->w;
315 drawable->pClipRects[0].y2 = drawable->h;
316 drawable->numBackClipRects = 1;
317 drawable->pBackClipRects[0].x1 = 0;
318 drawable->pBackClipRects[0].y1 = 0;
319 drawable->pBackClipRects[0].x2 = drawable->w;
320 drawable->pBackClipRects[0].y2 = drawable->h;
321
322 depth_region = NULL;
323 for (i = 0; i < count; i++) {
324 switch (buffers[i].attachment) {
325 case __DRI_BUFFER_FRONT_LEFT:
326 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
327 region_name = "dri2 front buffer";
328 break;
329
330 case __DRI_BUFFER_FAKE_FRONT_LEFT:
331 rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
332 region_name = "dri2 fake front buffer";
333 break;
334
335 case __DRI_BUFFER_BACK_LEFT:
336 rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
337 region_name = "dri2 back buffer";
338 break;
339
340 case __DRI_BUFFER_DEPTH:
341 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
342 region_name = "dri2 depth buffer";
343 break;
344
345 case __DRI_BUFFER_DEPTH_STENCIL:
346 rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
347 region_name = "dri2 depth / stencil buffer";
348 break;
349
350 case __DRI_BUFFER_STENCIL:
351 rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
352 region_name = "dri2 stencil buffer";
353 break;
354
355 case __DRI_BUFFER_ACCUM:
356 default:
357 fprintf(stderr,
358 "unhandled buffer attach event, attacment type %d\n",
359 buffers[i].attachment);
360 return;
361 }
362
363 if (rb == NULL)
364 continue;
365
366 if (rb->region && rb->region->name == buffers[i].name)
367 continue;
368
369 if (INTEL_DEBUG & DEBUG_DRI)
370 fprintf(stderr,
371 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
372 buffers[i].name, buffers[i].attachment,
373 buffers[i].cpp, buffers[i].pitch);
374
375 if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
376 if (INTEL_DEBUG & DEBUG_DRI)
377 fprintf(stderr, "(reusing depth buffer as stencil)\n");
378 intel_region_reference(&region, depth_region);
379 }
380 else
381 region = intel_region_alloc_for_handle(intel, buffers[i].cpp,
382 drawable->w,
383 drawable->h,
384 buffers[i].pitch / buffers[i].cpp,
385 buffers[i].name,
386 region_name);
387
388 if (buffers[i].attachment == __DRI_BUFFER_DEPTH)
389 depth_region = region;
390
391 intel_renderbuffer_set_region(intel, rb, region);
392 intel_region_release(&region);
393
394 if (buffers[i].attachment == __DRI_BUFFER_DEPTH_STENCIL) {
395 rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
396 if (rb != NULL) {
397 struct intel_region *stencil_region = NULL;
398
399 if (rb->region && rb->region->name == buffers[i].name)
400 continue;
401
402 intel_region_reference(&stencil_region, region);
403 intel_renderbuffer_set_region(intel, rb, stencil_region);
404 intel_region_release(&stencil_region);
405 }
406 }
407 }
408
409 driUpdateFramebufferSize(&intel->ctx, drawable);
410 }
411
412 void
413 intel_prepare_render(struct intel_context *intel)
414 {
415 __DRIcontext *driContext = intel->driContext;
416 __DRIdrawable *drawable;
417
418 drawable = driContext->driDrawablePriv;
419 if (drawable->dri2.stamp != driContext->dri2.draw_stamp) {
420 if (drawable->lastStamp != drawable->dri2.stamp)
421 intel_update_renderbuffers(driContext, drawable);
422 intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
423 driContext->dri2.draw_stamp = drawable->dri2.stamp;
424 }
425
426 drawable = driContext->driReadablePriv;
427 if (drawable->dri2.stamp != driContext->dri2.read_stamp) {
428 if (drawable->lastStamp != drawable->dri2.stamp)
429 intel_update_renderbuffers(driContext, drawable);
430 driContext->dri2.read_stamp = drawable->dri2.stamp;
431 }
432
433 /* If we're currently rendering to the front buffer, the rendering
434 * that will happen next will probably dirty the front buffer. So
435 * mark it as dirty here.
436 */
437 if (intel->is_front_buffer_rendering)
438 intel->front_buffer_dirty = GL_TRUE;
439 }
440
441 void
442 intel_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
443 {
444 struct intel_context *intel = intel_context(ctx);
445 __DRIcontext *driContext = intel->driContext;
446
447 if (!intel->using_dri2_swapbuffers &&
448 !intel->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
449 dri2InvalidateDrawable(driContext->driDrawablePriv);
450 dri2InvalidateDrawable(driContext->driReadablePriv);
451 }
452 }
453
454 static const struct dri_debug_control debug_control[] = {
455 { "tex", DEBUG_TEXTURE},
456 { "state", DEBUG_STATE},
457 { "ioctl", DEBUG_IOCTL},
458 { "blit", DEBUG_BLIT},
459 { "mip", DEBUG_MIPTREE},
460 { "fall", DEBUG_FALLBACKS},
461 { "verb", DEBUG_VERBOSE},
462 { "bat", DEBUG_BATCH},
463 { "pix", DEBUG_PIXEL},
464 { "buf", DEBUG_BUFMGR},
465 { "reg", DEBUG_REGION},
466 { "fbo", DEBUG_FBO},
467 { "lock", DEBUG_LOCK},
468 { "sync", DEBUG_SYNC},
469 { "prim", DEBUG_PRIMS },
470 { "vert", DEBUG_VERTS },
471 { "dri", DEBUG_DRI },
472 { "dma", DEBUG_DMA },
473 { "san", DEBUG_SANITY },
474 { "sleep", DEBUG_SLEEP },
475 { "stats", DEBUG_STATS },
476 { "tile", DEBUG_TILE },
477 { "sing", DEBUG_SINGLE_THREAD },
478 { "thre", DEBUG_SINGLE_THREAD },
479 { "wm", DEBUG_WM },
480 { "glsl_force", DEBUG_GLSL_FORCE },
481 { "urb", DEBUG_URB },
482 { "vs", DEBUG_VS },
483 { NULL, 0 }
484 };
485
486
487 static void
488 intelInvalidateState(GLcontext * ctx, GLuint new_state)
489 {
490 struct intel_context *intel = intel_context(ctx);
491
492 _swrast_InvalidateState(ctx, new_state);
493 _swsetup_InvalidateState(ctx, new_state);
494 _vbo_InvalidateState(ctx, new_state);
495 _tnl_InvalidateState(ctx, new_state);
496 _tnl_invalidate_vertex_state(ctx, new_state);
497
498 intel->NewGLState |= new_state;
499
500 if (intel->vtbl.invalidate_state)
501 intel->vtbl.invalidate_state( intel, new_state );
502 }
503
504 void
505 intel_flush(GLcontext *ctx, GLboolean needs_mi_flush)
506 {
507 struct intel_context *intel = intel_context(ctx);
508
509 if (intel->Fallback)
510 _swrast_flush(ctx);
511
512 if (intel->gen < 4)
513 INTEL_FIREVERTICES(intel);
514
515 if (intel->batch->map != intel->batch->ptr)
516 intel_batchbuffer_flush(intel->batch);
517 }
518
519 void
520 intelFlush(GLcontext * ctx)
521 {
522 intel_flush(ctx, GL_FALSE);
523 }
524
525 static void
526 intel_glFlush(GLcontext *ctx)
527 {
528 struct intel_context *intel = intel_context(ctx);
529
530 intel_flush(ctx, GL_TRUE);
531
532 intel_flush_front(ctx);
533
534 /* We're using glFlush as an indicator that a frame is done, which is
535 * what DRI2 does before calling SwapBuffers (and means we should catch
536 * people doing front-buffer rendering, as well)..
537 *
538 * Wait for the swapbuffers before the one we just emitted, so we don't
539 * get too many swaps outstanding for apps that are GPU-heavy but not
540 * CPU-heavy.
541 *
542 * Unfortunately, we don't have a handle to the batch containing the swap,
543 * and getting our hands on that doesn't seem worth it, so we just us the
544 * first batch we emitted after the last swap.
545 */
546 if (!intel->using_dri2_swapbuffers &&
547 intel->first_post_swapbuffers_batch != NULL) {
548 drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
549 drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
550 intel->first_post_swapbuffers_batch = NULL;
551 }
552 }
553
554 void
555 intelFinish(GLcontext * ctx)
556 {
557 struct gl_framebuffer *fb = ctx->DrawBuffer;
558 int i;
559
560 intelFlush(ctx);
561
562 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
563 struct intel_renderbuffer *irb;
564
565 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
566
567 if (irb && irb->region)
568 dri_bo_wait_rendering(irb->region->buffer);
569 }
570 if (fb->_DepthBuffer) {
571 /* XXX: Wait on buffer idle */
572 }
573 }
574
575 void
576 intelInitDriverFunctions(struct dd_function_table *functions)
577 {
578 _mesa_init_driver_functions(functions);
579
580 functions->Flush = intel_glFlush;
581 functions->Finish = intelFinish;
582 functions->GetString = intelGetString;
583 functions->UpdateState = intelInvalidateState;
584
585 intelInitTextureFuncs(functions);
586 intelInitTextureImageFuncs(functions);
587 intelInitTextureSubImageFuncs(functions);
588 intelInitTextureCopyImageFuncs(functions);
589 intelInitStateFuncs(functions);
590 intelInitClearFuncs(functions);
591 intelInitBufferFuncs(functions);
592 intelInitPixelFuncs(functions);
593 intelInitBufferObjectFuncs(functions);
594 intel_init_syncobj_functions(functions);
595 }
596
597
598 GLboolean
599 intelInitContext(struct intel_context *intel,
600 int api,
601 const __GLcontextModes * mesaVis,
602 __DRIcontext * driContextPriv,
603 void *sharedContextPrivate,
604 struct dd_function_table *functions)
605 {
606 GLcontext *ctx = &intel->ctx;
607 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
608 __DRIscreen *sPriv = driContextPriv->driScreenPriv;
609 struct intel_screen *intelScreen = sPriv->private;
610 int bo_reuse_mode;
611
612 /* we can't do anything without a connection to the device */
613 if (intelScreen->bufmgr == NULL)
614 return GL_FALSE;
615
616 if (!_mesa_initialize_context_for_api(&intel->ctx, api, mesaVis, shareCtx,
617 functions, (void *) intel)) {
618 printf("%s: failed to init mesa context\n", __FUNCTION__);
619 return GL_FALSE;
620 }
621
622 driContextPriv->driverPrivate = intel;
623 intel->intelScreen = intelScreen;
624 intel->driContext = driContextPriv;
625 intel->driFd = sPriv->fd;
626
627 if (IS_GEN6(intel->intelScreen->deviceID)) {
628 intel->gen = 6;
629 intel->needs_ff_sync = GL_TRUE;
630 intel->has_luminance_srgb = GL_TRUE;
631 } else if (IS_GEN5(intel->intelScreen->deviceID)) {
632 intel->gen = 5;
633 intel->needs_ff_sync = GL_TRUE;
634 intel->has_luminance_srgb = GL_TRUE;
635 } else if (IS_965(intel->intelScreen->deviceID)) {
636 intel->gen = 4;
637 if (IS_G4X(intel->intelScreen->deviceID)) {
638 intel->has_luminance_srgb = GL_TRUE;
639 intel->is_g4x = GL_TRUE;
640 }
641 } else if (IS_9XX(intel->intelScreen->deviceID)) {
642 intel->gen = 3;
643 if (IS_945(intel->intelScreen->deviceID)) {
644 intel->is_945 = GL_TRUE;
645 }
646 } else {
647 intel->gen = 2;
648 }
649
650 driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
651 sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
652 if (intelScreen->deviceID == PCI_CHIP_I865_G)
653 intel->maxBatchSize = 4096;
654 else
655 intel->maxBatchSize = BATCH_SZ;
656
657 intel->bufmgr = intelScreen->bufmgr;
658
659 bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
660 switch (bo_reuse_mode) {
661 case DRI_CONF_BO_REUSE_DISABLED:
662 break;
663 case DRI_CONF_BO_REUSE_ALL:
664 intel_bufmgr_gem_enable_reuse(intel->bufmgr);
665 break;
666 }
667
668 /* This doesn't yet catch all non-conformant rendering, but it's a
669 * start.
670 */
671 if (getenv("INTEL_STRICT_CONFORMANCE")) {
672 unsigned int value = atoi(getenv("INTEL_STRICT_CONFORMANCE"));
673 if (value > 0) {
674 intel->conformance_mode = value;
675 }
676 else {
677 intel->conformance_mode = 1;
678 }
679 }
680
681 if (intel->conformance_mode > 0) {
682 ctx->Const.MinLineWidth = 1.0;
683 ctx->Const.MinLineWidthAA = 1.0;
684 ctx->Const.MaxLineWidth = 1.0;
685 ctx->Const.MaxLineWidthAA = 1.0;
686 ctx->Const.LineWidthGranularity = 1.0;
687 }
688 else {
689 ctx->Const.MinLineWidth = 1.0;
690 ctx->Const.MinLineWidthAA = 1.0;
691 ctx->Const.MaxLineWidth = 5.0;
692 ctx->Const.MaxLineWidthAA = 5.0;
693 ctx->Const.LineWidthGranularity = 0.5;
694 }
695
696 ctx->Const.MinPointSize = 1.0;
697 ctx->Const.MinPointSizeAA = 1.0;
698 ctx->Const.MaxPointSize = 255.0;
699 ctx->Const.MaxPointSizeAA = 3.0;
700 ctx->Const.PointSizeGranularity = 1.0;
701
702 /* reinitialize the context point state.
703 * It depend on constants in __GLcontextRec::Const
704 */
705 _mesa_init_point(ctx);
706
707 meta_init_metaops(ctx, &intel->meta);
708 ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
709 if (intel->gen >= 4) {
710 if (MAX_WIDTH > 8192)
711 ctx->Const.MaxRenderbufferSize = 8192;
712 } else {
713 if (MAX_WIDTH > 2048)
714 ctx->Const.MaxRenderbufferSize = 2048;
715 }
716
717 /* Initialize the software rasterizer and helper modules. */
718 _swrast_CreateContext(ctx);
719 _vbo_CreateContext(ctx);
720 _tnl_CreateContext(ctx);
721 _swsetup_CreateContext(ctx);
722
723 /* Configure swrast to match hardware characteristics: */
724 _swrast_allow_pixel_fog(ctx, GL_FALSE);
725 _swrast_allow_vertex_fog(ctx, GL_TRUE);
726
727 _mesa_meta_init(ctx);
728
729 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
730 intel->hw_stipple = 1;
731
732 /* XXX FBO: this doesn't seem to be used anywhere */
733 switch (mesaVis->depthBits) {
734 case 0: /* what to do in this case? */
735 case 16:
736 intel->polygon_offset_scale = 1.0;
737 break;
738 case 24:
739 intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
740 break;
741 default:
742 assert(0);
743 break;
744 }
745
746 if (intel->gen >= 4)
747 intel->polygon_offset_scale /= 0xffff;
748
749 intel->RenderIndex = ~0;
750
751 switch (ctx->API) {
752 case API_OPENGL:
753 intelInitExtensions(ctx);
754 break;
755 case API_OPENGLES:
756 break;
757 case API_OPENGLES2:
758 intelInitExtensionsES2(ctx);
759 break;
760 }
761
762 INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
763 if (INTEL_DEBUG & DEBUG_BUFMGR)
764 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
765
766 intel->batch = intel_batchbuffer_alloc(intel);
767
768 intel_fbo_init(intel);
769
770 if (intel->ctx.Mesa_DXTn) {
771 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
772 _mesa_enable_extension(ctx, "GL_S3_s3tc");
773 }
774 else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
775 _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
776 }
777 intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
778 "texture_tiling");
779 intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
780
781 intel->prim.primitive = ~0;
782
783 /* Force all software fallbacks */
784 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
785 fprintf(stderr, "disabling 3D rasterization\n");
786 intel->no_rast = 1;
787 }
788
789 if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
790 fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
791 intel->always_flush_batch = 1;
792 }
793
794 if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
795 fprintf(stderr, "flushing GPU caches before/after each draw call\n");
796 intel->always_flush_cache = 1;
797 }
798
799 /* Disable all hardware rendering (skip emitting batches and fences/waits
800 * to the kernel)
801 */
802 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
803
804 return GL_TRUE;
805 }
806
807 void
808 intelDestroyContext(__DRIcontext * driContextPriv)
809 {
810 struct intel_context *intel =
811 (struct intel_context *) driContextPriv->driverPrivate;
812
813 assert(intel); /* should never be null */
814 if (intel) {
815 GLboolean release_texture_heaps;
816
817 INTEL_FIREVERTICES(intel);
818
819 _mesa_meta_free(&intel->ctx);
820
821 meta_destroy_metaops(&intel->meta);
822
823 intel->vtbl.destroy(intel);
824
825 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
826 _swsetup_DestroyContext(&intel->ctx);
827 _tnl_DestroyContext(&intel->ctx);
828 _vbo_DestroyContext(&intel->ctx);
829
830 _swrast_DestroyContext(&intel->ctx);
831 intel->Fallback = 0x0; /* don't call _swrast_Flush later */
832
833 intel_batchbuffer_free(intel->batch);
834 intel->batch = NULL;
835
836 free(intel->prim.vb);
837 intel->prim.vb = NULL;
838 dri_bo_unreference(intel->prim.vb_bo);
839 intel->prim.vb_bo = NULL;
840 dri_bo_unreference(intel->first_post_swapbuffers_batch);
841 intel->first_post_swapbuffers_batch = NULL;
842
843 if (release_texture_heaps) {
844 /* Nothing is currently done here to free texture heaps;
845 * but we're not using the texture heap utilities, so I
846 * rather think we shouldn't. I've taken a look, and can't
847 * find any private texture data hanging around anywhere, but
848 * I'm not yet certain there isn't any at all...
849 */
850 /* if (INTEL_DEBUG & DEBUG_TEXTURE)
851 fprintf(stderr, "do something to free texture heaps\n");
852 */
853 }
854
855 driDestroyOptionCache(&intel->optionCache);
856
857 /* free the Mesa context */
858 _mesa_free_context_data(&intel->ctx);
859
860 FREE(intel);
861 driContextPriv->driverPrivate = NULL;
862 }
863 }
864
865 GLboolean
866 intelUnbindContext(__DRIcontext * driContextPriv)
867 {
868 return GL_TRUE;
869 }
870
871 GLboolean
872 intelMakeCurrent(__DRIcontext * driContextPriv,
873 __DRIdrawable * driDrawPriv,
874 __DRIdrawable * driReadPriv)
875 {
876 struct intel_context *intel;
877 GET_CURRENT_CONTEXT(curCtx);
878
879 if (driContextPriv)
880 intel = (struct intel_context *) driContextPriv->driverPrivate;
881 else
882 intel = NULL;
883
884 /* According to the glXMakeCurrent() man page: "Pending commands to
885 * the previous context, if any, are flushed before it is released."
886 * But only flush if we're actually changing contexts.
887 */
888 if (intel_context(curCtx) && intel_context(curCtx) != intel) {
889 _mesa_flush(curCtx);
890 }
891
892 if (driContextPriv) {
893 struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
894 struct gl_framebuffer *readFb = driReadPriv->driverPrivate;
895
896 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
897 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
898 intel_prepare_render(intel);
899 _mesa_make_current(&intel->ctx, fb, readFb);
900 }
901 else {
902 _mesa_make_current(NULL, NULL, NULL);
903 }
904
905 return GL_TRUE;
906 }