1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/extensions.h"
32 #include "main/fbobject.h"
33 #include "main/framebuffer.h"
34 #include "main/imports.h"
35 #include "main/points.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
40 #include "drivers/common/driverfuncs.h"
41 #include "drivers/common/meta.h"
43 #include "intel_chipset.h"
44 #include "intel_buffers.h"
45 #include "intel_tex.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_clear.h"
48 #include "intel_extensions.h"
49 #include "intel_pixel.h"
50 #include "intel_regions.h"
51 #include "intel_buffer_objects.h"
52 #include "intel_fbo.h"
53 #include "intel_bufmgr.h"
54 #include "intel_screen.h"
56 #include "drirenderbuffer.h"
61 int INTEL_DEBUG
= (0);
65 #define DRIVER_DATE "20100330 DEVELOPMENT"
66 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
69 static const GLubyte
*
70 intelGetString(struct gl_context
* ctx
, GLenum name
)
72 const struct intel_context
*const intel
= intel_context(ctx
);
74 static char buffer
[128];
78 return (GLubyte
*) "Tungsten Graphics, Inc";
82 switch (intel
->intelScreen
->deviceID
) {
84 chipset
= "Intel(R) 845G";
87 chipset
= "Intel(R) 830M";
89 case PCI_CHIP_I855_GM
:
90 chipset
= "Intel(R) 852GM/855GM";
93 chipset
= "Intel(R) 865G";
96 chipset
= "Intel(R) 915G";
98 case PCI_CHIP_E7221_G
:
99 chipset
= "Intel (R) E7221G (i915)";
101 case PCI_CHIP_I915_GM
:
102 chipset
= "Intel(R) 915GM";
104 case PCI_CHIP_I945_G
:
105 chipset
= "Intel(R) 945G";
107 case PCI_CHIP_I945_GM
:
108 chipset
= "Intel(R) 945GM";
110 case PCI_CHIP_I945_GME
:
111 chipset
= "Intel(R) 945GME";
114 chipset
= "Intel(R) G33";
117 chipset
= "Intel(R) Q35";
120 chipset
= "Intel(R) Q33";
122 case PCI_CHIP_IGD_GM
:
124 chipset
= "Intel(R) IGD";
126 case PCI_CHIP_I965_Q
:
127 chipset
= "Intel(R) 965Q";
129 case PCI_CHIP_I965_G
:
130 case PCI_CHIP_I965_G_1
:
131 chipset
= "Intel(R) 965G";
133 case PCI_CHIP_I946_GZ
:
134 chipset
= "Intel(R) 946GZ";
136 case PCI_CHIP_I965_GM
:
137 chipset
= "Intel(R) 965GM";
139 case PCI_CHIP_I965_GME
:
140 chipset
= "Intel(R) 965GME/GLE";
142 case PCI_CHIP_GM45_GM
:
143 chipset
= "Mobile IntelĀ® GM45 Express Chipset";
145 case PCI_CHIP_IGD_E_G
:
146 chipset
= "Intel(R) Integrated Graphics Device";
149 chipset
= "Intel(R) G45/G43";
152 chipset
= "Intel(R) Q45/Q43";
155 chipset
= "Intel(R) G41";
158 case PCI_CHIP_B43_G1
:
159 chipset
= "Intel(R) B43";
162 chipset
= "Intel(R) Ironlake Desktop";
165 chipset
= "Intel(R) Ironlake Mobile";
167 case PCI_CHIP_SANDYBRIDGE_GT1
:
168 case PCI_CHIP_SANDYBRIDGE_GT2
:
169 case PCI_CHIP_SANDYBRIDGE_GT2_PLUS
:
170 chipset
= "Intel(R) Sandybridge Desktop";
172 case PCI_CHIP_SANDYBRIDGE_M_GT1
:
173 case PCI_CHIP_SANDYBRIDGE_M_GT2
:
174 case PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS
:
175 chipset
= "Intel(R) Sandybridge Mobile";
177 case PCI_CHIP_SANDYBRIDGE_S
:
178 chipset
= "Intel(R) Sandybridge Server";
181 chipset
= "Unknown Intel Chipset";
185 (void) driGetRendererString(buffer
, chipset
, DRIVER_DATE_GEM
, 0);
186 return (GLubyte
*) buffer
;
194 intel_flush_front(struct gl_context
*ctx
)
196 struct intel_context
*intel
= intel_context(ctx
);
197 __DRIcontext
*driContext
= intel
->driContext
;
198 __DRIscreen
*const screen
= intel
->intelScreen
->driScrnPriv
;
200 if ((ctx
->DrawBuffer
->Name
== 0) && intel
->front_buffer_dirty
) {
201 if (screen
->dri2
.loader
&&
202 (screen
->dri2
.loader
->base
.version
>= 2)
203 && (screen
->dri2
.loader
->flushFrontBuffer
!= NULL
) &&
204 driContext
->driDrawablePriv
&&
205 driContext
->driDrawablePriv
->loaderPrivate
) {
206 (*screen
->dri2
.loader
->flushFrontBuffer
)(driContext
->driDrawablePriv
,
207 driContext
->driDrawablePriv
->loaderPrivate
);
209 /* We set the dirty bit in intel_prepare_render() if we're
210 * front buffer rendering once we get there.
212 intel
->front_buffer_dirty
= GL_FALSE
;
218 intel_bits_per_pixel(const struct intel_renderbuffer
*rb
)
220 return _mesa_get_format_bytes(rb
->Base
.Format
) * 8;
224 intel_update_renderbuffers(__DRIcontext
*context
, __DRIdrawable
*drawable
)
226 struct gl_framebuffer
*fb
= drawable
->driverPrivate
;
227 struct intel_renderbuffer
*rb
;
228 struct intel_region
*region
, *depth_region
;
229 struct intel_context
*intel
= context
->driverPrivate
;
230 struct intel_renderbuffer
*front_rb
, *back_rb
, *depth_rb
, *stencil_rb
;
231 __DRIbuffer
*buffers
= NULL
;
234 unsigned int attachments
[10];
235 const char *region_name
;
237 /* If we're rendering to the fake front buffer, make sure all the
238 * pending drawing has landed on the real front buffer. Otherwise
239 * when we eventually get to DRI2GetBuffersWithFormat the stale
240 * real front buffer contents will get copied to the new fake front
243 if (intel
->is_front_buffer_rendering
) {
244 intel_flush(&intel
->ctx
);
245 intel_flush_front(&intel
->ctx
);
248 /* Set this up front, so that in case our buffers get invalidated
249 * while we're getting new buffers, we don't clobber the stamp and
250 * thus ignore the invalidate. */
251 drawable
->lastStamp
= drawable
->dri2
.stamp
;
253 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
254 fprintf(stderr
, "enter %s, drawable %p\n", __func__
, drawable
);
256 screen
= intel
->intelScreen
->driScrnPriv
;
258 if (screen
->dri2
.loader
259 && (screen
->dri2
.loader
->base
.version
> 2)
260 && (screen
->dri2
.loader
->getBuffersWithFormat
!= NULL
)) {
262 front_rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
263 back_rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
264 depth_rb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
265 stencil_rb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
268 if ((intel
->is_front_buffer_rendering
||
269 intel
->is_front_buffer_reading
||
270 !back_rb
) && front_rb
) {
271 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
272 attachments
[i
++] = intel_bits_per_pixel(front_rb
);
276 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
277 attachments
[i
++] = intel_bits_per_pixel(back_rb
);
280 if ((depth_rb
!= NULL
) && (stencil_rb
!= NULL
)) {
281 attachments
[i
++] = __DRI_BUFFER_DEPTH_STENCIL
;
282 attachments
[i
++] = intel_bits_per_pixel(depth_rb
);
283 } else if (depth_rb
!= NULL
) {
284 attachments
[i
++] = __DRI_BUFFER_DEPTH
;
285 attachments
[i
++] = intel_bits_per_pixel(depth_rb
);
286 } else if (stencil_rb
!= NULL
) {
287 attachments
[i
++] = __DRI_BUFFER_STENCIL
;
288 attachments
[i
++] = intel_bits_per_pixel(stencil_rb
);
292 (*screen
->dri2
.loader
->getBuffersWithFormat
)(drawable
,
297 drawable
->loaderPrivate
);
298 } else if (screen
->dri2
.loader
) {
300 if (intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
))
301 attachments
[i
++] = __DRI_BUFFER_FRONT_LEFT
;
302 if (intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
))
303 attachments
[i
++] = __DRI_BUFFER_BACK_LEFT
;
304 if (intel_get_renderbuffer(fb
, BUFFER_DEPTH
))
305 attachments
[i
++] = __DRI_BUFFER_DEPTH
;
306 if (intel_get_renderbuffer(fb
, BUFFER_STENCIL
))
307 attachments
[i
++] = __DRI_BUFFER_STENCIL
;
309 buffers
= (*screen
->dri2
.loader
->getBuffers
)(drawable
,
314 drawable
->loaderPrivate
);
324 drawable
->numClipRects
= 1;
325 drawable
->pClipRects
[0].x1
= 0;
326 drawable
->pClipRects
[0].y1
= 0;
327 drawable
->pClipRects
[0].x2
= drawable
->w
;
328 drawable
->pClipRects
[0].y2
= drawable
->h
;
329 drawable
->numBackClipRects
= 1;
330 drawable
->pBackClipRects
[0].x1
= 0;
331 drawable
->pBackClipRects
[0].y1
= 0;
332 drawable
->pBackClipRects
[0].x2
= drawable
->w
;
333 drawable
->pBackClipRects
[0].y2
= drawable
->h
;
336 for (i
= 0; i
< count
; i
++) {
337 switch (buffers
[i
].attachment
) {
338 case __DRI_BUFFER_FRONT_LEFT
:
339 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
340 region_name
= "dri2 front buffer";
343 case __DRI_BUFFER_FAKE_FRONT_LEFT
:
344 rb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
345 region_name
= "dri2 fake front buffer";
348 case __DRI_BUFFER_BACK_LEFT
:
349 rb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
350 region_name
= "dri2 back buffer";
353 case __DRI_BUFFER_DEPTH
:
354 rb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
355 region_name
= "dri2 depth buffer";
358 case __DRI_BUFFER_DEPTH_STENCIL
:
359 rb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
360 region_name
= "dri2 depth / stencil buffer";
363 case __DRI_BUFFER_STENCIL
:
364 rb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
365 region_name
= "dri2 stencil buffer";
368 case __DRI_BUFFER_ACCUM
:
371 "unhandled buffer attach event, attachment type %d\n",
372 buffers
[i
].attachment
);
379 if (rb
->region
&& rb
->region
->name
== buffers
[i
].name
)
382 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
384 "attaching buffer %d, at %d, cpp %d, pitch %d\n",
385 buffers
[i
].name
, buffers
[i
].attachment
,
386 buffers
[i
].cpp
, buffers
[i
].pitch
);
388 if (buffers
[i
].attachment
== __DRI_BUFFER_STENCIL
&& depth_region
) {
389 if (unlikely(INTEL_DEBUG
& DEBUG_DRI
))
390 fprintf(stderr
, "(reusing depth buffer as stencil)\n");
391 intel_region_reference(®ion
, depth_region
);
394 region
= intel_region_alloc_for_handle(intel
->intelScreen
,
398 buffers
[i
].pitch
/ buffers
[i
].cpp
,
402 if (buffers
[i
].attachment
== __DRI_BUFFER_DEPTH
)
403 depth_region
= region
;
405 intel_renderbuffer_set_region(intel
, rb
, region
);
406 intel_region_release(®ion
);
408 if (buffers
[i
].attachment
== __DRI_BUFFER_DEPTH_STENCIL
) {
409 rb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
411 struct intel_region
*stencil_region
= NULL
;
413 if (rb
->region
&& rb
->region
->name
== buffers
[i
].name
)
416 intel_region_reference(&stencil_region
, region
);
417 intel_renderbuffer_set_region(intel
, rb
, stencil_region
);
418 intel_region_release(&stencil_region
);
423 driUpdateFramebufferSize(&intel
->ctx
, drawable
);
427 * intel_prepare_render should be called anywhere that curent read/drawbuffer
431 intel_prepare_render(struct intel_context
*intel
)
433 __DRIcontext
*driContext
= intel
->driContext
;
434 __DRIdrawable
*drawable
;
436 drawable
= driContext
->driDrawablePriv
;
437 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.draw_stamp
) {
438 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
439 intel_update_renderbuffers(driContext
, drawable
);
440 intel_draw_buffer(&intel
->ctx
, intel
->ctx
.DrawBuffer
);
441 driContext
->dri2
.draw_stamp
= drawable
->dri2
.stamp
;
444 drawable
= driContext
->driReadablePriv
;
445 if (drawable
&& drawable
->dri2
.stamp
!= driContext
->dri2
.read_stamp
) {
446 if (drawable
->lastStamp
!= drawable
->dri2
.stamp
)
447 intel_update_renderbuffers(driContext
, drawable
);
448 driContext
->dri2
.read_stamp
= drawable
->dri2
.stamp
;
451 /* If we're currently rendering to the front buffer, the rendering
452 * that will happen next will probably dirty the front buffer. So
453 * mark it as dirty here.
455 if (intel
->is_front_buffer_rendering
)
456 intel
->front_buffer_dirty
= GL_TRUE
;
458 /* Wait for the swapbuffers before the one we just emitted, so we
459 * don't get too many swaps outstanding for apps that are GPU-heavy
462 * We're using intelDRI2Flush (called from the loader before
463 * swapbuffer) and glFlush (for front buffer rendering) as the
464 * indicator that a frame is done and then throttle when we get
465 * here as we prepare to render the next frame. At this point for
466 * round trips for swap/copy and getting new buffers are done and
467 * we'll spend less time waiting on the GPU.
469 * Unfortunately, we don't have a handle to the batch containing
470 * the swap, and getting our hands on that doesn't seem worth it,
471 * so we just us the first batch we emitted after the last swap.
473 if (intel
->need_throttle
) {
474 drmCommandNone(intel
->driFd
, DRM_I915_GEM_THROTTLE
);
475 intel
->need_throttle
= GL_FALSE
;
480 intel_viewport(struct gl_context
*ctx
, GLint x
, GLint y
, GLsizei w
, GLsizei h
)
482 struct intel_context
*intel
= intel_context(ctx
);
483 __DRIcontext
*driContext
= intel
->driContext
;
485 if (intel
->saved_viewport
)
486 intel
->saved_viewport(ctx
, x
, y
, w
, h
);
488 if (ctx
->DrawBuffer
->Name
== 0) {
489 dri2InvalidateDrawable(driContext
->driDrawablePriv
);
490 dri2InvalidateDrawable(driContext
->driReadablePriv
);
494 static const struct dri_debug_control debug_control
[] = {
495 { "tex", DEBUG_TEXTURE
},
496 { "state", DEBUG_STATE
},
497 { "ioctl", DEBUG_IOCTL
},
498 { "blit", DEBUG_BLIT
},
499 { "mip", DEBUG_MIPTREE
},
500 { "fall", DEBUG_FALLBACKS
},
501 { "verb", DEBUG_VERBOSE
},
502 { "bat", DEBUG_BATCH
},
503 { "pix", DEBUG_PIXEL
},
504 { "buf", DEBUG_BUFMGR
},
505 { "reg", DEBUG_REGION
},
508 { "sync", DEBUG_SYNC
},
509 { "prim", DEBUG_PRIMS
},
510 { "vert", DEBUG_VERTS
},
511 { "dri", DEBUG_DRI
},
513 { "san", DEBUG_SANITY
},
514 { "sleep", DEBUG_SLEEP
},
515 { "stats", DEBUG_STATS
},
516 { "tile", DEBUG_TILE
},
517 { "sing", DEBUG_SINGLE_THREAD
},
518 { "thre", DEBUG_SINGLE_THREAD
},
520 { "urb", DEBUG_URB
},
522 { "clip", DEBUG_CLIP
},
528 intelInvalidateState(struct gl_context
* ctx
, GLuint new_state
)
530 struct intel_context
*intel
= intel_context(ctx
);
532 _swrast_InvalidateState(ctx
, new_state
);
533 _swsetup_InvalidateState(ctx
, new_state
);
534 _vbo_InvalidateState(ctx
, new_state
);
535 _tnl_InvalidateState(ctx
, new_state
);
536 _tnl_invalidate_vertex_state(ctx
, new_state
);
538 intel
->NewGLState
|= new_state
;
540 if (intel
->vtbl
.invalidate_state
)
541 intel
->vtbl
.invalidate_state( intel
, new_state
);
545 intel_flush(struct gl_context
*ctx
)
547 struct intel_context
*intel
= intel_context(ctx
);
553 INTEL_FIREVERTICES(intel
);
555 if (intel
->batch
.used
)
556 intel_batchbuffer_flush(intel
);
560 intel_glFlush(struct gl_context
*ctx
)
562 struct intel_context
*intel
= intel_context(ctx
);
565 intel_flush_front(ctx
);
566 if (intel
->is_front_buffer_rendering
)
567 intel
->need_throttle
= GL_TRUE
;
571 intelFinish(struct gl_context
* ctx
)
573 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
577 intel_flush_front(ctx
);
579 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
580 struct intel_renderbuffer
*irb
;
582 irb
= intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
584 if (irb
&& irb
->region
&& irb
->region
->buffer
)
585 drm_intel_bo_wait_rendering(irb
->region
->buffer
);
587 if (fb
->_DepthBuffer
) {
588 /* XXX: Wait on buffer idle */
593 intelInitDriverFunctions(struct dd_function_table
*functions
)
595 _mesa_init_driver_functions(functions
);
597 functions
->Flush
= intel_glFlush
;
598 functions
->Finish
= intelFinish
;
599 functions
->GetString
= intelGetString
;
600 functions
->UpdateState
= intelInvalidateState
;
602 intelInitTextureFuncs(functions
);
603 intelInitTextureImageFuncs(functions
);
604 intelInitTextureSubImageFuncs(functions
);
605 intelInitTextureCopyImageFuncs(functions
);
606 intelInitStateFuncs(functions
);
607 intelInitClearFuncs(functions
);
608 intelInitBufferFuncs(functions
);
609 intelInitPixelFuncs(functions
);
610 intelInitBufferObjectFuncs(functions
);
611 intel_init_syncobj_functions(functions
);
616 intelInitContext(struct intel_context
*intel
,
618 const struct gl_config
* mesaVis
,
619 __DRIcontext
* driContextPriv
,
620 void *sharedContextPrivate
,
621 struct dd_function_table
*functions
)
623 struct gl_context
*ctx
= &intel
->ctx
;
624 struct gl_context
*shareCtx
= (struct gl_context
*) sharedContextPrivate
;
625 __DRIscreen
*sPriv
= driContextPriv
->driScreenPriv
;
626 struct intel_screen
*intelScreen
= sPriv
->private;
628 struct gl_config visual
;
630 /* we can't do anything without a connection to the device */
631 if (intelScreen
->bufmgr
== NULL
)
634 /* Can't rely on invalidate events, fall back to glViewport hack */
635 if (!driContextPriv
->driScreenPriv
->dri2
.useInvalidate
) {
636 intel
->saved_viewport
= functions
->Viewport
;
637 functions
->Viewport
= intel_viewport
;
640 if (mesaVis
== NULL
) {
641 memset(&visual
, 0, sizeof visual
);
645 if (!_mesa_initialize_context(&intel
->ctx
, api
, mesaVis
, shareCtx
,
646 functions
, (void *) intel
)) {
647 printf("%s: failed to init mesa context\n", __FUNCTION__
);
651 driContextPriv
->driverPrivate
= intel
;
652 intel
->intelScreen
= intelScreen
;
653 intel
->driContext
= driContextPriv
;
654 intel
->driFd
= sPriv
->fd
;
656 intel
->has_xrgb_textures
= GL_TRUE
;
657 if (IS_GEN6(intel
->intelScreen
->deviceID
)) {
659 intel
->needs_ff_sync
= GL_TRUE
;
660 intel
->has_luminance_srgb
= GL_TRUE
;
661 } else if (IS_GEN5(intel
->intelScreen
->deviceID
)) {
663 intel
->needs_ff_sync
= GL_TRUE
;
664 intel
->has_luminance_srgb
= GL_TRUE
;
665 } else if (IS_965(intel
->intelScreen
->deviceID
)) {
667 if (IS_G4X(intel
->intelScreen
->deviceID
)) {
668 intel
->has_luminance_srgb
= GL_TRUE
;
669 intel
->is_g4x
= GL_TRUE
;
671 } else if (IS_9XX(intel
->intelScreen
->deviceID
)) {
673 if (IS_945(intel
->intelScreen
->deviceID
)) {
674 intel
->is_945
= GL_TRUE
;
678 if (intel
->intelScreen
->deviceID
== PCI_CHIP_I830_M
||
679 intel
->intelScreen
->deviceID
== PCI_CHIP_845_G
) {
680 intel
->has_xrgb_textures
= GL_FALSE
;
684 memset(&ctx
->TextureFormatSupported
, 0,
685 sizeof(ctx
->TextureFormatSupported
));
686 ctx
->TextureFormatSupported
[MESA_FORMAT_ARGB8888
] = GL_TRUE
;
687 if (intel
->has_xrgb_textures
)
688 ctx
->TextureFormatSupported
[MESA_FORMAT_XRGB8888
] = GL_TRUE
;
689 ctx
->TextureFormatSupported
[MESA_FORMAT_ARGB4444
] = GL_TRUE
;
690 ctx
->TextureFormatSupported
[MESA_FORMAT_ARGB1555
] = GL_TRUE
;
691 ctx
->TextureFormatSupported
[MESA_FORMAT_RGB565
] = GL_TRUE
;
692 ctx
->TextureFormatSupported
[MESA_FORMAT_L8
] = GL_TRUE
;
693 ctx
->TextureFormatSupported
[MESA_FORMAT_A8
] = GL_TRUE
;
694 ctx
->TextureFormatSupported
[MESA_FORMAT_I8
] = GL_TRUE
;
695 ctx
->TextureFormatSupported
[MESA_FORMAT_AL88
] = GL_TRUE
;
697 ctx
->TextureFormatSupported
[MESA_FORMAT_AL1616
] = GL_TRUE
;
698 ctx
->TextureFormatSupported
[MESA_FORMAT_S8_Z24
] = GL_TRUE
;
700 * This was disabled in initial FBO enabling to avoid combinations
701 * of depth+stencil that wouldn't work together. We since decided
702 * that it was OK, since it's up to the app to come up with the
703 * combo that actually works, so this can probably be re-enabled.
706 ctx->TextureFormatSupported[MESA_FORMAT_Z16] = GL_TRUE;
707 ctx->TextureFormatSupported[MESA_FORMAT_Z24] = GL_TRUE;
710 /* ctx->Extensions.MESA_ycbcr_texture */
711 ctx
->TextureFormatSupported
[MESA_FORMAT_YCBCR
] = GL_TRUE
;
712 ctx
->TextureFormatSupported
[MESA_FORMAT_YCBCR_REV
] = GL_TRUE
;
714 /* GL_3DFX_texture_compression_FXT1 */
715 ctx
->TextureFormatSupported
[MESA_FORMAT_RGB_FXT1
] = GL_TRUE
;
716 ctx
->TextureFormatSupported
[MESA_FORMAT_RGBA_FXT1
] = GL_TRUE
;
718 /* GL_EXT_texture_compression_s3tc */
719 ctx
->TextureFormatSupported
[MESA_FORMAT_RGB_DXT1
] = GL_TRUE
;
720 ctx
->TextureFormatSupported
[MESA_FORMAT_RGBA_DXT1
] = GL_TRUE
;
721 ctx
->TextureFormatSupported
[MESA_FORMAT_RGBA_DXT3
] = GL_TRUE
;
722 ctx
->TextureFormatSupported
[MESA_FORMAT_RGBA_DXT5
] = GL_TRUE
;
725 /* GL_ARB_texture_rg */
726 ctx
->TextureFormatSupported
[MESA_FORMAT_R8
] = GL_TRUE
;
727 ctx
->TextureFormatSupported
[MESA_FORMAT_R16
] = GL_TRUE
;
728 ctx
->TextureFormatSupported
[MESA_FORMAT_RG88
] = GL_TRUE
;
729 ctx
->TextureFormatSupported
[MESA_FORMAT_RG1616
] = GL_TRUE
;
731 ctx
->TextureFormatSupported
[MESA_FORMAT_DUDV8
] = GL_TRUE
;
732 ctx
->TextureFormatSupported
[MESA_FORMAT_SIGNED_RGBA8888_REV
] = GL_TRUE
;
734 /* GL_EXT_texture_sRGB */
735 ctx
->TextureFormatSupported
[MESA_FORMAT_SARGB8
] = GL_TRUE
;
736 if (intel
->gen
>= 5 || intel
->is_g4x
)
737 ctx
->TextureFormatSupported
[MESA_FORMAT_SRGB_DXT1
] = GL_TRUE
;
738 ctx
->TextureFormatSupported
[MESA_FORMAT_SRGBA_DXT1
] = GL_TRUE
;
739 ctx
->TextureFormatSupported
[MESA_FORMAT_SRGBA_DXT3
] = GL_TRUE
;
740 ctx
->TextureFormatSupported
[MESA_FORMAT_SRGBA_DXT5
] = GL_TRUE
;
741 if (intel
->has_luminance_srgb
) {
742 ctx
->TextureFormatSupported
[MESA_FORMAT_SL8
] = GL_TRUE
;
743 ctx
->TextureFormatSupported
[MESA_FORMAT_SLA8
] = GL_TRUE
;
747 driParseConfigFiles(&intel
->optionCache
, &intelScreen
->optionCache
,
748 sPriv
->myNum
, (intel
->gen
>= 4) ? "i965" : "i915");
750 intel
->maxBatchSize
= 4096;
752 intel
->maxBatchSize
= sizeof(intel
->batch
.map
);
754 intel
->bufmgr
= intelScreen
->bufmgr
;
756 bo_reuse_mode
= driQueryOptioni(&intel
->optionCache
, "bo_reuse");
757 switch (bo_reuse_mode
) {
758 case DRI_CONF_BO_REUSE_DISABLED
:
760 case DRI_CONF_BO_REUSE_ALL
:
761 intel_bufmgr_gem_enable_reuse(intel
->bufmgr
);
765 /* This doesn't yet catch all non-conformant rendering, but it's a
768 if (getenv("INTEL_STRICT_CONFORMANCE")) {
769 unsigned int value
= atoi(getenv("INTEL_STRICT_CONFORMANCE"));
771 intel
->conformance_mode
= value
;
774 intel
->conformance_mode
= 1;
778 if (intel
->conformance_mode
> 0) {
779 ctx
->Const
.MinLineWidth
= 1.0;
780 ctx
->Const
.MinLineWidthAA
= 1.0;
781 ctx
->Const
.MaxLineWidth
= 1.0;
782 ctx
->Const
.MaxLineWidthAA
= 1.0;
783 ctx
->Const
.LineWidthGranularity
= 1.0;
786 ctx
->Const
.MinLineWidth
= 1.0;
787 ctx
->Const
.MinLineWidthAA
= 1.0;
788 ctx
->Const
.MaxLineWidth
= 5.0;
789 ctx
->Const
.MaxLineWidthAA
= 5.0;
790 ctx
->Const
.LineWidthGranularity
= 0.5;
793 ctx
->Const
.MinPointSize
= 1.0;
794 ctx
->Const
.MinPointSizeAA
= 1.0;
795 ctx
->Const
.MaxPointSize
= 255.0;
796 ctx
->Const
.MaxPointSizeAA
= 3.0;
797 ctx
->Const
.PointSizeGranularity
= 1.0;
799 ctx
->Const
.MaxSamples
= 1.0;
801 /* reinitialize the context point state.
802 * It depend on constants in __struct gl_contextRec::Const
804 _mesa_init_point(ctx
);
806 if (intel
->gen
>= 4) {
807 ctx
->Const
.sRGBCapable
= GL_TRUE
;
808 if (MAX_WIDTH
> 8192)
809 ctx
->Const
.MaxRenderbufferSize
= 8192;
811 if (MAX_WIDTH
> 2048)
812 ctx
->Const
.MaxRenderbufferSize
= 2048;
815 /* Initialize the software rasterizer and helper modules. */
816 _swrast_CreateContext(ctx
);
817 _vbo_CreateContext(ctx
);
818 _tnl_CreateContext(ctx
);
819 _swsetup_CreateContext(ctx
);
821 /* Configure swrast to match hardware characteristics: */
822 _swrast_allow_pixel_fog(ctx
, GL_FALSE
);
823 _swrast_allow_vertex_fog(ctx
, GL_TRUE
);
825 _mesa_meta_init(ctx
);
827 intel
->hw_stencil
= mesaVis
->stencilBits
&& mesaVis
->depthBits
== 24;
828 intel
->hw_stipple
= 1;
830 /* XXX FBO: this doesn't seem to be used anywhere */
831 switch (mesaVis
->depthBits
) {
832 case 0: /* what to do in this case? */
834 intel
->polygon_offset_scale
= 1.0;
837 intel
->polygon_offset_scale
= 2.0; /* req'd to pass glean */
845 intel
->polygon_offset_scale
/= 0xffff;
847 intel
->RenderIndex
= ~0;
851 intelInitExtensions(ctx
);
856 intelInitExtensionsES2(ctx
);
860 INTEL_DEBUG
= driParseDebugString(getenv("INTEL_DEBUG"), debug_control
);
861 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
862 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
864 intel_batchbuffer_reset(intel
);
866 intel_fbo_init(intel
);
868 if (intel
->ctx
.Mesa_DXTn
) {
869 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
870 _mesa_enable_extension(ctx
, "GL_S3_s3tc");
872 else if (driQueryOptionb(&intel
->optionCache
, "force_s3tc_enable")) {
873 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
875 intel
->use_texture_tiling
= driQueryOptionb(&intel
->optionCache
,
877 intel
->use_early_z
= driQueryOptionb(&intel
->optionCache
, "early_z");
879 intel
->prim
.primitive
= ~0;
881 /* Force all software fallbacks */
882 if (driQueryOptionb(&intel
->optionCache
, "no_rast")) {
883 fprintf(stderr
, "disabling 3D rasterization\n");
887 if (driQueryOptionb(&intel
->optionCache
, "always_flush_batch")) {
888 fprintf(stderr
, "flushing batchbuffer before/after each draw call\n");
889 intel
->always_flush_batch
= 1;
892 if (driQueryOptionb(&intel
->optionCache
, "always_flush_cache")) {
893 fprintf(stderr
, "flushing GPU caches before/after each draw call\n");
894 intel
->always_flush_cache
= 1;
901 intelDestroyContext(__DRIcontext
* driContextPriv
)
903 struct intel_context
*intel
=
904 (struct intel_context
*) driContextPriv
->driverPrivate
;
906 assert(intel
); /* should never be null */
908 INTEL_FIREVERTICES(intel
);
910 _mesa_meta_free(&intel
->ctx
);
912 intel
->vtbl
.destroy(intel
);
914 _swsetup_DestroyContext(&intel
->ctx
);
915 _tnl_DestroyContext(&intel
->ctx
);
916 _vbo_DestroyContext(&intel
->ctx
);
918 _swrast_DestroyContext(&intel
->ctx
);
919 intel
->Fallback
= 0x0; /* don't call _swrast_Flush later */
921 intel_batchbuffer_free(intel
);
923 free(intel
->prim
.vb
);
924 intel
->prim
.vb
= NULL
;
925 drm_intel_bo_unreference(intel
->prim
.vb_bo
);
926 intel
->prim
.vb_bo
= NULL
;
928 driDestroyOptionCache(&intel
->optionCache
);
930 /* free the Mesa context */
931 _mesa_free_context_data(&intel
->ctx
);
934 driContextPriv
->driverPrivate
= NULL
;
939 intelUnbindContext(__DRIcontext
* driContextPriv
)
941 /* Unset current context and dispath table */
942 _mesa_make_current(NULL
, NULL
, NULL
);
948 intelMakeCurrent(__DRIcontext
* driContextPriv
,
949 __DRIdrawable
* driDrawPriv
,
950 __DRIdrawable
* driReadPriv
)
952 struct intel_context
*intel
;
953 GET_CURRENT_CONTEXT(curCtx
);
956 intel
= (struct intel_context
*) driContextPriv
->driverPrivate
;
960 /* According to the glXMakeCurrent() man page: "Pending commands to
961 * the previous context, if any, are flushed before it is released."
962 * But only flush if we're actually changing contexts.
964 if (intel_context(curCtx
) && intel_context(curCtx
) != intel
) {
968 if (driContextPriv
) {
969 struct gl_framebuffer
*fb
, *readFb
;
971 if (driDrawPriv
== NULL
&& driReadPriv
== NULL
) {
972 fb
= _mesa_get_incomplete_framebuffer();
973 readFb
= _mesa_get_incomplete_framebuffer();
975 fb
= driDrawPriv
->driverPrivate
;
976 readFb
= driReadPriv
->driverPrivate
;
977 driContextPriv
->dri2
.draw_stamp
= driDrawPriv
->dri2
.stamp
- 1;
978 driContextPriv
->dri2
.read_stamp
= driReadPriv
->dri2
.stamp
- 1;
981 intel_prepare_render(intel
);
982 _mesa_make_current(&intel
->ctx
, fb
, readFb
);
984 /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
985 * is NULL at that point. We can't call _mesa_makecurrent()
986 * first, since we need the buffer size for the initial
987 * viewport. So just call intel_draw_buffer() again here. */
988 intel_draw_buffer(&intel
->ctx
, intel
->ctx
.DrawBuffer
);
991 _mesa_make_current(NULL
, NULL
, NULL
);