1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
45 #include "drivers/common/driverfuncs.h"
47 #include "intel_screen.h"
51 #include "intel_chipset.h"
52 #include "intel_buffers.h"
53 #include "intel_tex.h"
54 #include "intel_ioctl.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_blit.h"
57 #include "intel_pixel.h"
58 #include "intel_regions.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_fbo.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr.h"
64 #include "drirenderbuffer.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
69 int INTEL_DEBUG
= (0);
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_framebuffer_object
87 #define need_GL_EXT_multi_draw_arrays
88 #define need_GL_EXT_secondary_color
89 #define need_GL_NV_vertex_program
90 #define need_GL_ATI_separate_stencil
91 #define need_GL_EXT_point_parameters
92 #define need_GL_VERSION_2_0
93 #define need_GL_VERSION_2_1
94 #define need_GL_ARB_shader_objects
95 #define need_GL_ARB_vertex_shader
97 #include "extension_helper.h"
99 #define DRIVER_DATE "20080716"
100 #define DRIVER_DATE_GEM "GEM " DRIVER_DATE
102 static const GLubyte
*
103 intelGetString(GLcontext
* ctx
, GLenum name
)
105 const struct intel_context
*const intel
= intel_context(ctx
);
107 static char buffer
[128];
111 return (GLubyte
*) "Tungsten Graphics, Inc";
115 switch (intel
->intelScreen
->deviceID
) {
117 chipset
= "Intel(R) 845G";
119 case PCI_CHIP_I830_M
:
120 chipset
= "Intel(R) 830M";
122 case PCI_CHIP_I855_GM
:
123 chipset
= "Intel(R) 852GM/855GM";
125 case PCI_CHIP_I865_G
:
126 chipset
= "Intel(R) 865G";
128 case PCI_CHIP_I915_G
:
129 chipset
= "Intel(R) 915G";
131 case PCI_CHIP_E7221_G
:
132 chipset
= "Intel (R) E7221G (i915)";
134 case PCI_CHIP_I915_GM
:
135 chipset
= "Intel(R) 915GM";
137 case PCI_CHIP_I945_G
:
138 chipset
= "Intel(R) 945G";
140 case PCI_CHIP_I945_GM
:
141 chipset
= "Intel(R) 945GM";
143 case PCI_CHIP_I945_GME
:
144 chipset
= "Intel(R) 945GME";
147 chipset
= "Intel(R) G33";
150 chipset
= "Intel(R) Q35";
153 chipset
= "Intel(R) Q33";
155 case PCI_CHIP_I965_Q
:
156 chipset
= "Intel(R) 965Q";
158 case PCI_CHIP_I965_G
:
159 case PCI_CHIP_I965_G_1
:
160 chipset
= "Intel(R) 965G";
162 case PCI_CHIP_I946_GZ
:
163 chipset
= "Intel(R) 946GZ";
165 case PCI_CHIP_I965_GM
:
166 chipset
= "Intel(R) 965GM";
168 case PCI_CHIP_I965_GME
:
169 chipset
= "Intel(R) 965GME/GLE";
171 case PCI_CHIP_GM45_GM
:
172 chipset
= "Mobile IntelĀ® GM45 Express Chipset";
174 case PCI_CHIP_IGD_E_G
:
175 chipset
= "Intel(R) Integrated Graphics Device";
178 chipset
= "Intel(R) G45/G43";
181 chipset
= "Intel(R) Q45/Q43";
184 chipset
= "Unknown Intel Chipset";
188 (void) driGetRendererString(buffer
, chipset
,
189 (intel
->ttm
) ? DRIVER_DATE_GEM
: DRIVER_DATE
,
191 return (GLubyte
*) buffer
;
199 * Extension strings exported by the intel driver.
201 * Extensions supported by all chips supported by i830_dri, i915_dri, or
204 static const struct dri_extension card_extensions
[] = {
205 {"GL_ARB_multisample", GL_ARB_multisample_functions
},
206 {"GL_ARB_multitexture", NULL
},
207 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
208 {"GL_NV_point_sprite", GL_NV_point_sprite_functions
},
209 {"GL_ARB_texture_border_clamp", NULL
},
210 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
211 {"GL_ARB_texture_cube_map", NULL
},
212 {"GL_ARB_texture_env_add", NULL
},
213 {"GL_ARB_texture_env_combine", NULL
},
214 {"GL_ARB_texture_env_crossbar", NULL
},
215 {"GL_ARB_texture_env_dot3", NULL
},
216 {"GL_ARB_texture_mirrored_repeat", NULL
},
217 {"GL_ARB_texture_non_power_of_two", NULL
},
218 {"GL_ARB_texture_rectangle", NULL
},
219 {"GL_NV_texture_rectangle", NULL
},
220 {"GL_EXT_texture_rectangle", NULL
},
221 {"GL_ARB_point_parameters", NULL
},
222 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
223 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
224 {"GL_ARB_window_pos", GL_ARB_window_pos_functions
},
225 {"GL_EXT_blend_color", GL_EXT_blend_color_functions
},
226 {"GL_EXT_blend_equation_separate",
227 GL_EXT_blend_equation_separate_functions
},
228 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
229 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
230 {"GL_EXT_blend_logic_op", NULL
},
231 {"GL_EXT_blend_subtract", NULL
},
232 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
233 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
234 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
235 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions
},
236 #if 1 /* XXX FBO temporary? */
237 {"GL_EXT_packed_depth_stencil", NULL
},
239 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
240 {"GL_EXT_stencil_wrap", NULL
},
241 {"GL_EXT_texture_edge_clamp", NULL
},
242 {"GL_EXT_texture_env_combine", NULL
},
243 {"GL_EXT_texture_env_dot3", NULL
},
244 {"GL_EXT_texture_filter_anisotropic", NULL
},
245 {"GL_EXT_texture_lod_bias", NULL
},
246 {"GL_3DFX_texture_compression_FXT1", NULL
},
247 {"GL_APPLE_client_storage", NULL
},
248 {"GL_MESA_pack_invert", NULL
},
249 {"GL_MESA_ycbcr_texture", NULL
},
250 {"GL_NV_blend_square", NULL
},
251 {"GL_NV_vertex_program", GL_NV_vertex_program_functions
},
252 {"GL_NV_vertex_program1_1", NULL
},
253 { "GL_SGIS_generate_mipmap", NULL
},
257 static const struct dri_extension brw_extensions
[] = {
258 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions
},
259 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions
},
260 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions
},
261 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions
},
262 { "GL_ARB_point_sprite", NULL
},
263 { "GL_ARB_fragment_shader", NULL
},
264 { "GL_ARB_draw_buffers", NULL
},
265 { "GL_ARB_depth_texture", NULL
},
266 { "GL_ARB_fragment_program", NULL
},
267 { "GL_ARB_shadow", NULL
},
268 { "GL_EXT_shadow_funcs", NULL
},
269 { "GL_ARB_fragment_program_shadow", NULL
},
270 /* ARB extn won't work if not enabled */
271 { "GL_SGIX_depth_texture", NULL
},
272 { "GL_EXT_texture_sRGB", NULL
},
276 static const struct dri_extension arb_oc_extensions
[] = {
277 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions
},
281 static const struct dri_extension ttm_extensions
[] = {
282 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions
},
283 {"GL_ARB_pixel_buffer_object", NULL
},
288 * Initializes potential list of extensions if ctx == NULL, or actually enables
289 * extensions for a context.
291 void intelInitExtensions(GLcontext
*ctx
, GLboolean enable_imaging
)
293 struct intel_context
*intel
= ctx
?intel_context(ctx
):NULL
;
295 /* Disable imaging extension until convolution is working in teximage paths.
297 enable_imaging
= GL_FALSE
;
299 driInitExtensions(ctx
, card_extensions
, enable_imaging
);
301 if (intel
== NULL
|| intel
->ttm
)
302 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
305 (IS_965(intel
->intelScreen
->deviceID
) &&
306 intel
->intelScreen
->drmMinor
>= 8))
307 driInitExtensions(ctx
, arb_oc_extensions
, GL_FALSE
);
309 if (intel
== NULL
|| IS_965(intel
->intelScreen
->deviceID
))
310 driInitExtensions(ctx
, brw_extensions
, GL_FALSE
);
313 static const struct dri_debug_control debug_control
[] = {
314 { "tex", DEBUG_TEXTURE
},
315 { "state", DEBUG_STATE
},
316 { "ioctl", DEBUG_IOCTL
},
317 { "blit", DEBUG_BLIT
},
318 { "mip", DEBUG_MIPTREE
},
319 { "fall", DEBUG_FALLBACKS
},
320 { "verb", DEBUG_VERBOSE
},
321 { "bat", DEBUG_BATCH
},
322 { "pix", DEBUG_PIXEL
},
323 { "buf", DEBUG_BUFMGR
},
324 { "reg", DEBUG_REGION
},
326 { "lock", DEBUG_LOCK
},
327 { "sync", DEBUG_SYNC
},
328 { "prim", DEBUG_PRIMS
},
329 { "vert", DEBUG_VERTS
},
330 { "dri", DEBUG_DRI
},
331 { "dma", DEBUG_DMA
},
332 { "san", DEBUG_SANITY
},
333 { "sleep", DEBUG_SLEEP
},
334 { "stats", DEBUG_STATS
},
335 { "tile", DEBUG_TILE
},
336 { "sing", DEBUG_SINGLE_THREAD
},
337 { "thre", DEBUG_SINGLE_THREAD
},
339 { "urb", DEBUG_URB
},
346 intelInvalidateState(GLcontext
* ctx
, GLuint new_state
)
348 struct intel_context
*intel
= intel_context(ctx
);
350 _swrast_InvalidateState(ctx
, new_state
);
351 _swsetup_InvalidateState(ctx
, new_state
);
352 _vbo_InvalidateState(ctx
, new_state
);
353 _tnl_InvalidateState(ctx
, new_state
);
354 _tnl_invalidate_vertex_state(ctx
, new_state
);
356 intel
->NewGLState
|= new_state
;
358 if (intel
->vtbl
.invalidate_state
)
359 intel
->vtbl
.invalidate_state( intel
, new_state
);
364 intelFlush(GLcontext
* ctx
)
366 struct intel_context
*intel
= intel_context(ctx
);
371 if (!IS_965(intel
->intelScreen
->deviceID
))
372 INTEL_FIREVERTICES(intel
);
374 /* Emit a flush so that any frontbuffer rendering that might have occurred
375 * lands onscreen in a timely manner, even if the X Server doesn't trigger
378 intel_batchbuffer_emit_mi_flush(intel
->batch
);
380 if (intel
->batch
->map
!= intel
->batch
->ptr
)
381 intel_batchbuffer_flush(intel
->batch
);
385 intelFinish(GLcontext
* ctx
)
387 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
392 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
393 struct intel_renderbuffer
*irb
;
395 irb
= intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
398 dri_bo_wait_rendering(irb
->region
->buffer
);
400 if (fb
->_DepthBuffer
) {
401 /* XXX: Wait on buffer idle */
406 intelBeginQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
408 struct intel_context
*intel
= intel_context( ctx
);
409 struct drm_i915_mmio io
= {
410 .read_write
= I915_MMIO_READ
,
411 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
415 intelFinish(&intel
->ctx
);
416 drmCommandWrite(intel
->driFd
, DRM_I915_MMIO
, &io
, sizeof(io
));
420 intelEndQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
422 struct intel_context
*intel
= intel_context( ctx
);
424 struct drm_i915_mmio io
= {
425 .read_write
= I915_MMIO_READ
,
426 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
429 intelFinish(&intel
->ctx
);
430 drmCommandWrite(intel
->driFd
, DRM_I915_MMIO
, &io
, sizeof(io
));
431 q
->Result
= tmp
- q
->Result
;
436 /** Driver-specific fence emit implementation for the fake memory manager. */
438 intel_fence_emit(void *private)
440 struct intel_context
*intel
= (struct intel_context
*)private;
443 /* XXX: Need to emit a flush, if we haven't already (at least with the
444 * current batchbuffer implementation, we have).
447 fence
= intelEmitIrqLocked(intel
);
452 /** Driver-specific fence wait implementation for the fake memory manager. */
454 intel_fence_wait(void *private, unsigned int cookie
)
456 struct intel_context
*intel
= (struct intel_context
*)private;
458 intelWaitIrq(intel
, cookie
);
464 intel_init_bufmgr(struct intel_context
*intel
)
466 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
467 GLboolean gem_disable
= getenv("INTEL_NO_GEM") != NULL
;
469 GLboolean gem_supported
;
470 struct drm_i915_getparam gp
;
472 gp
.param
= I915_PARAM_HAS_GEM
;
473 gp
.value
= &gem_kernel
;
475 (void) drmCommandWriteRead(intel
->driFd
, DRM_I915_GETPARAM
, &gp
, sizeof(gp
));
477 /* If we've got a new enough DDX that's initializing GEM and giving us
478 * object handles for the shared buffers, use that.
480 intel
->ttm
= GL_FALSE
;
481 if (intel
->intelScreen
->driScrnPriv
->dri2
.enabled
)
482 gem_supported
= GL_TRUE
;
483 else if (intel
->intelScreen
->driScrnPriv
->ddx_version
.minor
>= 9 &&
485 intel
->intelScreen
->front
.bo_handle
!= -1)
486 gem_supported
= GL_TRUE
;
488 gem_supported
= GL_FALSE
;
490 if (!gem_disable
&& gem_supported
) {
492 intel
->bufmgr
= intel_bufmgr_gem_init(intel
->driFd
,
494 if (intel
->bufmgr
!= NULL
)
495 intel
->ttm
= GL_TRUE
;
497 bo_reuse_mode
= driQueryOptioni(&intel
->optionCache
, "bo_reuse");
498 switch (bo_reuse_mode
) {
499 case DRI_CONF_BO_REUSE_DISABLED
:
501 case DRI_CONF_BO_REUSE_ALL
:
502 intel_bufmgr_gem_enable_reuse(intel
->bufmgr
);
506 /* Otherwise, use the classic buffer manager. */
507 if (intel
->bufmgr
== NULL
) {
509 fprintf(stderr
, "GEM disabled. Using classic.\n");
511 fprintf(stderr
, "Failed to initialize GEM. "
512 "Falling back to classic.\n");
515 if (intelScreen
->tex
.size
== 0) {
516 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
521 intel
->bufmgr
= intel_bufmgr_fake_init(intelScreen
->tex
.offset
,
522 intelScreen
->tex
.map
,
523 intelScreen
->tex
.size
,
529 /* XXX bufmgr should be per-screen, not per-context */
530 intelScreen
->ttm
= intel
->ttm
;
536 intelInitDriverFunctions(struct dd_function_table
*functions
)
538 _mesa_init_driver_functions(functions
);
540 functions
->Flush
= intelFlush
;
541 functions
->Finish
= intelFinish
;
542 functions
->GetString
= intelGetString
;
543 functions
->UpdateState
= intelInvalidateState
;
545 functions
->CopyColorTable
= _swrast_CopyColorTable
;
546 functions
->CopyColorSubTable
= _swrast_CopyColorSubTable
;
547 functions
->CopyConvolutionFilter1D
= _swrast_CopyConvolutionFilter1D
;
548 functions
->CopyConvolutionFilter2D
= _swrast_CopyConvolutionFilter2D
;
550 functions
->BeginQuery
= intelBeginQuery
;
551 functions
->EndQuery
= intelEndQuery
;
553 intelInitTextureFuncs(functions
);
554 intelInitStateFuncs(functions
);
555 intelInitBufferFuncs(functions
);
556 intelInitPixelFuncs(functions
);
561 intelInitContext(struct intel_context
*intel
,
562 const __GLcontextModes
* mesaVis
,
563 __DRIcontextPrivate
* driContextPriv
,
564 void *sharedContextPrivate
,
565 struct dd_function_table
*functions
)
567 GLcontext
*ctx
= &intel
->ctx
;
568 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
569 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
570 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*) sPriv
->private;
571 volatile struct drm_i915_sarea
*saPriv
= (struct drm_i915_sarea
*)
572 (((GLubyte
*) sPriv
->pSAREA
) + intelScreen
->sarea_priv_offset
);
575 if (!_mesa_initialize_context(&intel
->ctx
, mesaVis
, shareCtx
,
576 functions
, (void *) intel
)) {
577 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__
);
581 driContextPriv
->driverPrivate
= intel
;
582 intel
->intelScreen
= intelScreen
;
583 intel
->driScreen
= sPriv
;
584 intel
->sarea
= saPriv
;
587 intel
->hHWContext
= driContextPriv
->hHWContext
;
588 intel
->driFd
= sPriv
->fd
;
589 intel
->driHwLock
= sPriv
->lock
;
591 intel
->width
= intelScreen
->width
;
592 intel
->height
= intelScreen
->height
;
594 driParseConfigFiles(&intel
->optionCache
, &intelScreen
->optionCache
,
595 intel
->driScreen
->myNum
,
596 IS_965(intelScreen
->deviceID
) ? "i965" : "i915");
597 if (intelScreen
->deviceID
== PCI_CHIP_I865_G
)
598 intel
->maxBatchSize
= 4096;
600 intel
->maxBatchSize
= BATCH_SZ
;
602 if (!intel_init_bufmgr(intel
))
605 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
607 /* This doesn't yet catch all non-conformant rendering, but it's a
610 if (getenv("INTEL_STRICT_CONFORMANCE")) {
611 intel
->strict_conformance
= 1;
614 if (intel
->strict_conformance
) {
615 ctx
->Const
.MinLineWidth
= 1.0;
616 ctx
->Const
.MinLineWidthAA
= 1.0;
617 ctx
->Const
.MaxLineWidth
= 1.0;
618 ctx
->Const
.MaxLineWidthAA
= 1.0;
619 ctx
->Const
.LineWidthGranularity
= 1.0;
622 ctx
->Const
.MinLineWidth
= 1.0;
623 ctx
->Const
.MinLineWidthAA
= 1.0;
624 ctx
->Const
.MaxLineWidth
= 5.0;
625 ctx
->Const
.MaxLineWidthAA
= 5.0;
626 ctx
->Const
.LineWidthGranularity
= 0.5;
629 ctx
->Const
.MinPointSize
= 1.0;
630 ctx
->Const
.MinPointSizeAA
= 1.0;
631 ctx
->Const
.MaxPointSize
= 255.0;
632 ctx
->Const
.MaxPointSizeAA
= 3.0;
633 ctx
->Const
.PointSizeGranularity
= 1.0;
635 /* reinitialize the context point state.
636 * It depend on constants in __GLcontextRec::Const
638 _mesa_init_point(ctx
);
640 ctx
->Const
.MaxColorAttachments
= 4; /* XXX FBO: review this */
642 /* Initialize the software rasterizer and helper modules. */
643 _swrast_CreateContext(ctx
);
644 _vbo_CreateContext(ctx
);
645 _tnl_CreateContext(ctx
);
646 _swsetup_CreateContext(ctx
);
648 /* Configure swrast to match hardware characteristics: */
649 _swrast_allow_pixel_fog(ctx
, GL_FALSE
);
650 _swrast_allow_vertex_fog(ctx
, GL_TRUE
);
652 intel
->hw_stencil
= mesaVis
->stencilBits
&& mesaVis
->depthBits
== 24;
653 intel
->hw_stipple
= 1;
655 /* XXX FBO: this doesn't seem to be used anywhere */
656 switch (mesaVis
->depthBits
) {
657 case 0: /* what to do in this case? */
659 intel
->polygon_offset_scale
= 1.0;
662 intel
->polygon_offset_scale
= 2.0; /* req'd to pass glean */
669 if (IS_965(intelScreen
->deviceID
))
670 intel
->polygon_offset_scale
/= 0xffff;
672 intel
->RenderIndex
= ~0;
674 fthrottle_mode
= driQueryOptioni(&intel
->optionCache
, "fthrottle_mode");
675 intel
->irqsEmitted
= 0;
677 intel
->do_irqs
= (intel
->intelScreen
->irq_active
&&
678 fthrottle_mode
== DRI_CONF_FTHROTTLE_IRQS
);
680 intel
->do_usleeps
= (fthrottle_mode
== DRI_CONF_FTHROTTLE_USLEEPS
);
682 _math_matrix_ctr(&intel
->ViewportMatrix
);
684 if (IS_965(intelScreen
->deviceID
) && !intel
->intelScreen
->irq_active
) {
685 _mesa_printf("IRQs not active. Exiting\n");
689 intelInitExtensions(ctx
, GL_FALSE
);
691 INTEL_DEBUG
= driParseDebugString(getenv("INTEL_DEBUG"), debug_control
);
692 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
693 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
695 if (!sPriv
->dri2
.enabled
)
696 intel_recreate_static_regions(intel
);
698 intel
->batch
= intel_batchbuffer_alloc(intel
);
700 intel_bufferobj_init(intel
);
701 intel_fbo_init(intel
);
703 if (intel
->ctx
.Mesa_DXTn
) {
704 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
705 _mesa_enable_extension(ctx
, "GL_S3_s3tc");
707 else if (driQueryOptionb(&intel
->optionCache
, "force_s3tc_enable")) {
708 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
711 intel
->prim
.primitive
= ~0;
713 /* Force all software fallbacks */
714 if (driQueryOptionb(&intel
->optionCache
, "no_rast")) {
715 fprintf(stderr
, "disabling 3D rasterization\n");
719 /* Disable all hardware rendering (skip emitting batches and fences/waits
722 intel
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
728 intelDestroyContext(__DRIcontextPrivate
* driContextPriv
)
730 struct intel_context
*intel
=
731 (struct intel_context
*) driContextPriv
->driverPrivate
;
733 assert(intel
); /* should never be null */
735 GLboolean release_texture_heaps
;
737 INTEL_FIREVERTICES(intel
);
739 intel
->vtbl
.destroy(intel
);
741 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
742 _swsetup_DestroyContext(&intel
->ctx
);
743 _tnl_DestroyContext(&intel
->ctx
);
744 _vbo_DestroyContext(&intel
->ctx
);
746 _swrast_DestroyContext(&intel
->ctx
);
747 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
749 intel_batchbuffer_free(intel
->batch
);
750 free(intel
->prim
.vb
);
752 if (release_texture_heaps
) {
753 /* This share group is about to go away, free our private
754 * texture object data.
756 if (INTEL_DEBUG
& DEBUG_TEXTURE
)
757 fprintf(stderr
, "do something to free texture heaps\n");
760 /* free the Mesa context */
761 _mesa_free_context_data(&intel
->ctx
);
763 dri_bufmgr_destroy(intel
->bufmgr
);
768 intelUnbindContext(__DRIcontextPrivate
* driContextPriv
)
774 intelMakeCurrent(__DRIcontextPrivate
* driContextPriv
,
775 __DRIdrawablePrivate
* driDrawPriv
,
776 __DRIdrawablePrivate
* driReadPriv
)
778 __DRIscreenPrivate
*psp
= driDrawPriv
->driScreenPriv
;
780 if (driContextPriv
) {
781 struct intel_context
*intel
=
782 (struct intel_context
*) driContextPriv
->driverPrivate
;
783 struct intel_framebuffer
*intel_fb
=
784 (struct intel_framebuffer
*) driDrawPriv
->driverPrivate
;
785 GLframebuffer
*readFb
= (GLframebuffer
*) driReadPriv
->driverPrivate
;
788 /* XXX FBO temporary fix-ups! */
789 /* if the renderbuffers don't have regions, init them from the context */
790 if (!driContextPriv
->driScreenPriv
->dri2
.enabled
) {
791 struct intel_renderbuffer
*irbDepth
792 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
793 struct intel_renderbuffer
*irbStencil
794 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
796 if (intel_fb
->color_rb
[0]) {
797 intel_renderbuffer_set_region(intel_fb
->color_rb
[0],
798 intel
->front_region
);
800 if (intel_fb
->color_rb
[1]) {
801 intel_renderbuffer_set_region(intel_fb
->color_rb
[1],
805 if (intel_fb
->color_rb
[2]) {
806 intel_renderbuffer_set_region(intel_fb
->color_rb
[2],
807 intel
->third_region
);
811 intel_renderbuffer_set_region(irbDepth
, intel
->depth_region
);
814 intel_renderbuffer_set_region(irbStencil
, intel
->depth_region
);
818 /* set GLframebuffer size to match window, if needed */
819 driUpdateFramebufferSize(&intel
->ctx
, driDrawPriv
);
821 if (driReadPriv
!= driDrawPriv
) {
822 driUpdateFramebufferSize(&intel
->ctx
, driReadPriv
);
825 _mesa_make_current(&intel
->ctx
, &intel_fb
->Base
, readFb
);
827 /* The drawbuffer won't always be updated by _mesa_make_current:
829 if (intel
->ctx
.DrawBuffer
== &intel_fb
->Base
) {
831 if (intel
->driReadDrawable
!= driReadPriv
)
832 intel
->driReadDrawable
= driReadPriv
;
834 if (intel
->driDrawable
!= driDrawPriv
) {
835 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
838 driDrawPriv
->vblFlags
= (intel
->intelScreen
->irq_active
!= 0)
839 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
840 : VBLANK_FLAG_NO_IRQ
;
842 (*psp
->systemTime
->getUST
) (&intel_fb
->swap_ust
);
843 driDrawableInitVBlank(driDrawPriv
);
844 intel_fb
->vbl_waited
= driDrawPriv
->vblSeq
;
846 for (i
= 0; i
< (intel
->intelScreen
->third
.handle
? 3 : 2); i
++) {
847 if (intel_fb
->color_rb
[i
])
848 intel_fb
->color_rb
[i
]->vbl_pending
= driDrawPriv
->vblSeq
;
851 intel
->driDrawable
= driDrawPriv
;
852 intelWindowMoved(intel
);
855 intel_draw_buffer(&intel
->ctx
, &intel_fb
->Base
);
859 _mesa_make_current(NULL
, NULL
, NULL
);
866 intelContendedLock(struct intel_context
*intel
, GLuint flags
)
868 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
869 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
870 volatile struct drm_i915_sarea
*sarea
= intel
->sarea
;
871 int me
= intel
->hHWContext
;
873 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
876 if (INTEL_DEBUG
& DEBUG_LOCK
)
877 _mesa_printf("%s - got contended lock\n", __progname
);
879 /* If the window moved, may need to set a new cliprect now.
881 * NOTE: This releases and regains the hw lock, so all state
882 * checking must be done *after* this call:
885 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
887 if (sarea
&& sarea
->ctxOwner
!= me
) {
888 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
889 fprintf(stderr
, "Lost Context: sarea->ctxOwner %x me %x\n",
890 sarea
->ctxOwner
, me
);
892 sarea
->ctxOwner
= me
;
895 /* If the last consumer of the texture memory wasn't us, notify the fake
896 * bufmgr and record the new owner. We should have the memory shared
897 * between contexts of a single fake bufmgr, but this will at least make
898 * things correct for now.
900 if (!intel
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
901 sarea
->texAge
= intel
->hHWContext
;
902 intel_bufmgr_fake_contended_lock_take(intel
->bufmgr
);
903 if (INTEL_DEBUG
& DEBUG_BATCH
)
904 intel_decode_context_reset();
905 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
906 fprintf(stderr
, "Lost Textures: sarea->texAge %x hw context %x\n",
907 sarea
->ctxOwner
, intel
->hHWContext
);
910 if (sarea
->width
!= intel
->width
|| sarea
->height
!= intel
->height
) {
911 int numClipRects
= intel
->numClipRects
;
914 * FIXME: Really only need to do this when drawing to a
915 * common back- or front buffer.
919 * This will essentially drop the outstanding batchbuffer on
922 intel
->numClipRects
= 0;
925 _swrast_flush(&intel
->ctx
);
927 if (!IS_965(intel
->intelScreen
->deviceID
))
928 INTEL_FIREVERTICES(intel
);
930 if (intel
->batch
->map
!= intel
->batch
->ptr
)
931 intel_batchbuffer_flush(intel
->batch
);
933 intel
->numClipRects
= numClipRects
;
935 /* force window update */
936 intel
->lastStamp
= 0;
938 intel
->width
= sarea
->width
;
939 intel
->height
= sarea
->height
;
944 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
945 intelWindowMoved(intel
);
946 intel
->lastStamp
= dPriv
->lastStamp
;
951 _glthread_DECLARE_STATIC_MUTEX(lockMutex
);
953 /* Lock the hardware and validate our state.
955 void LOCK_HARDWARE( struct intel_context
*intel
)
957 __DRIdrawable
*dPriv
= intel
->driDrawable
;
958 __DRIscreen
*sPriv
= intel
->driScreen
;
960 struct intel_framebuffer
*intel_fb
= NULL
;
961 struct intel_renderbuffer
*intel_rb
= NULL
;
963 _glthread_LOCK_MUTEX(lockMutex
);
964 assert(!intel
->locked
);
967 if (intel
->driDrawable
) {
968 intel_fb
= intel
->driDrawable
->driverPrivate
;
972 intel_get_renderbuffer(&intel_fb
->Base
,
973 intel_fb
->Base
._ColorDrawBufferIndexes
[0]);
976 if (intel_rb
&& dPriv
->vblFlags
&&
977 !(dPriv
->vblFlags
& VBLANK_FLAG_NO_IRQ
) &&
978 (intel_fb
->vbl_waited
- intel_rb
->vbl_pending
) > (1<<23)) {
981 vbl
.request
.type
= DRM_VBLANK_ABSOLUTE
;
983 if ( dPriv
->vblFlags
& VBLANK_FLAG_SECONDARY
) {
984 vbl
.request
.type
|= DRM_VBLANK_SECONDARY
;
987 vbl
.request
.sequence
= intel_rb
->vbl_pending
;
988 drmWaitVBlank(intel
->driFd
, &vbl
);
989 intel_fb
->vbl_waited
= vbl
.reply
.sequence
;
992 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
993 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
995 if (sPriv
->dri2
.enabled
) {
997 drmGetLock(intel
->driFd
, intel
->hHWContext
, 0);
998 if (__driParseEvents(dPriv
->driContextPriv
, dPriv
)) {
999 intelWindowMoved(intel
);
1000 intel_draw_buffer(&intel
->ctx
, intel
->ctx
.DrawBuffer
);
1003 intelContendedLock( intel
, 0 );
1007 if (INTEL_DEBUG
& DEBUG_LOCK
)
1008 _mesa_printf("%s - locked\n", __progname
);
1012 /* Unlock the hardware using the global current context
1014 void UNLOCK_HARDWARE( struct intel_context
*intel
)
1016 intel
->vtbl
.note_unlock( intel
);
1019 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
1021 _glthread_UNLOCK_MUTEX(lockMutex
);
1023 if (INTEL_DEBUG
& DEBUG_LOCK
)
1024 _mesa_printf("%s - unlocked\n", __progname
);
1027 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1030 if (intel
->batch
->cliprect_mode
== REFERENCES_CLIPRECTS
)
1031 intel_batchbuffer_flush(intel
->batch
);