1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
45 #include "drivers/common/driverfuncs.h"
47 #include "intel_screen.h"
51 #include "intel_chipset.h"
52 #include "intel_buffers.h"
53 #include "intel_tex.h"
54 #include "intel_ioctl.h"
55 #include "intel_batchbuffer.h"
56 #include "intel_blit.h"
57 #include "intel_pixel.h"
58 #include "intel_regions.h"
59 #include "intel_buffer_objects.h"
60 #include "intel_fbo.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr_ttm.h"
64 #include "drirenderbuffer.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
69 int INTEL_DEBUG
= (0);
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_framebuffer_object
87 #define need_GL_EXT_multi_draw_arrays
88 #define need_GL_EXT_secondary_color
89 #define need_GL_NV_vertex_program
90 #define need_GL_ATI_separate_stencil
91 #define need_GL_EXT_point_parameters
92 #define need_GL_VERSION_2_0
93 #define need_GL_VERSION_2_1
94 #define need_GL_ARB_shader_objects
95 #define need_GL_ARB_vertex_shader
97 #include "extension_helper.h"
99 #define DRIVER_DATE "20061102"
101 static const GLubyte
*
102 intelGetString(GLcontext
* ctx
, GLenum name
)
105 static char buffer
[128];
109 return (GLubyte
*) "Tungsten Graphics, Inc";
113 switch (intel_context(ctx
)->intelScreen
->deviceID
) {
115 chipset
= "Intel(R) 845G";
117 case PCI_CHIP_I830_M
:
118 chipset
= "Intel(R) 830M";
120 case PCI_CHIP_I855_GM
:
121 chipset
= "Intel(R) 852GM/855GM";
123 case PCI_CHIP_I865_G
:
124 chipset
= "Intel(R) 865G";
126 case PCI_CHIP_I915_G
:
127 chipset
= "Intel(R) 915G";
129 case PCI_CHIP_E7221_G
:
130 chipset
= "Intel (R) E7221G (i915)";
132 case PCI_CHIP_I915_GM
:
133 chipset
= "Intel(R) 915GM";
135 case PCI_CHIP_I945_G
:
136 chipset
= "Intel(R) 945G";
138 case PCI_CHIP_I945_GM
:
139 chipset
= "Intel(R) 945GM";
141 case PCI_CHIP_I945_GME
:
142 chipset
= "Intel(R) 945GME";
145 chipset
= "Intel(R) G33";
148 chipset
= "Intel(R) Q35";
151 chipset
= "Intel(R) Q33";
153 case PCI_CHIP_I965_Q
:
154 chipset
= "Intel(R) 965Q";
156 case PCI_CHIP_I965_G
:
157 case PCI_CHIP_I965_G_1
:
158 chipset
= "Intel(R) 965G";
160 case PCI_CHIP_I946_GZ
:
161 chipset
= "Intel(R) 946GZ";
163 case PCI_CHIP_I965_GM
:
164 chipset
= "Intel(R) 965GM";
166 case PCI_CHIP_I965_GME
:
167 chipset
= "Intel(R) 965GME/GLE";
169 case PCI_CHIP_IGD_GM
:
170 chipset
= "Intel(R) Integrated Graphics Device";
173 chipset
= "Unknown Intel Chipset";
177 (void) driGetRendererString(buffer
, chipset
, DRIVER_DATE
, 0);
178 return (GLubyte
*) buffer
;
186 * Extension strings exported by the intel driver.
189 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
190 * old i830-specific driver.
192 static const struct dri_extension card_extensions
[] = {
193 {"GL_ARB_multisample", GL_ARB_multisample_functions
},
194 {"GL_ARB_multitexture", NULL
},
195 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
196 {"GL_NV_point_sprite", GL_NV_point_sprite_functions
},
197 {"GL_ARB_texture_border_clamp", NULL
},
198 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
199 {"GL_ARB_texture_cube_map", NULL
},
200 {"GL_ARB_texture_env_add", NULL
},
201 {"GL_ARB_texture_env_combine", NULL
},
202 {"GL_ARB_texture_env_dot3", NULL
},
203 {"GL_ARB_texture_mirrored_repeat", NULL
},
204 {"GL_ARB_texture_non_power_of_two", NULL
},
205 {"GL_ARB_texture_rectangle", NULL
},
206 {"GL_NV_texture_rectangle", NULL
},
207 {"GL_EXT_texture_rectangle", NULL
},
208 {"GL_ARB_point_parameters", NULL
},
209 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
210 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
211 {"GL_ARB_window_pos", GL_ARB_window_pos_functions
},
212 {"GL_EXT_blend_color", GL_EXT_blend_color_functions
},
213 {"GL_EXT_blend_equation_separate",
214 GL_EXT_blend_equation_separate_functions
},
215 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
216 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
217 {"GL_EXT_blend_logic_op", NULL
},
218 {"GL_EXT_blend_subtract", NULL
},
219 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
220 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
221 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
222 {"GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions
},
223 #if 1 /* XXX FBO temporary? */
224 {"GL_EXT_packed_depth_stencil", NULL
},
226 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
227 {"GL_EXT_stencil_wrap", NULL
},
228 {"GL_EXT_texture_edge_clamp", NULL
},
229 {"GL_EXT_texture_env_combine", NULL
},
230 {"GL_EXT_texture_env_dot3", NULL
},
231 {"GL_EXT_texture_filter_anisotropic", NULL
},
232 {"GL_EXT_texture_lod_bias", NULL
},
233 {"GL_3DFX_texture_compression_FXT1", NULL
},
234 {"GL_APPLE_client_storage", NULL
},
235 {"GL_MESA_pack_invert", NULL
},
236 {"GL_MESA_ycbcr_texture", NULL
},
237 {"GL_NV_blend_square", NULL
},
238 {"GL_NV_vertex_program", GL_NV_vertex_program_functions
},
239 {"GL_NV_vertex_program1_1", NULL
},
240 { "GL_SGIS_generate_mipmap", NULL
},
244 static const struct dri_extension brw_extensions
[] = {
245 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions
},
246 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions
},
247 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions
},
248 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions
},
249 { "GL_ARB_point_sprite", NULL
},
250 { "GL_ARB_fragment_shader", NULL
},
251 { "GL_ARB_draw_buffers", NULL
},
252 { "GL_ARB_depth_texture", NULL
},
253 { "GL_ARB_fragment_program", NULL
},
254 { "GL_ARB_shadow", NULL
},
255 { "GL_EXT_shadow_funcs", NULL
},
256 /* ARB extn won't work if not enabled */
257 { "GL_SGIX_depth_texture", NULL
},
258 { "GL_ARB_texture_env_crossbar", NULL
},
259 { "GL_EXT_texture_sRGB", NULL
},
263 static const struct dri_extension arb_oc_extensions
[] = {
264 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions
},
268 static const struct dri_extension ttm_extensions
[] = {
269 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions
},
270 {"GL_ARB_pixel_buffer_object", NULL
},
275 * Initializes potential list of extensions if ctx == NULL, or actually enables
276 * extensions for a context.
278 void intelInitExtensions(GLcontext
*ctx
, GLboolean enable_imaging
)
280 struct intel_context
*intel
= ctx
?intel_context(ctx
):NULL
;
282 /* Disable imaging extension until convolution is working in teximage paths.
284 enable_imaging
= GL_FALSE
;
286 driInitExtensions(ctx
, card_extensions
, enable_imaging
);
288 if (intel
== NULL
|| intel
->ttm
)
289 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
292 (IS_965(intel
->intelScreen
->deviceID
) &&
293 intel
->intelScreen
->drmMinor
>= 8))
294 driInitExtensions(ctx
, arb_oc_extensions
, GL_FALSE
);
296 if (intel
== NULL
|| IS_965(intel
->intelScreen
->deviceID
))
297 driInitExtensions(ctx
, brw_extensions
, GL_FALSE
);
300 static const struct dri_debug_control debug_control
[] = {
301 { "tex", DEBUG_TEXTURE
},
302 { "state", DEBUG_STATE
},
303 { "ioctl", DEBUG_IOCTL
},
304 { "blit", DEBUG_BLIT
},
305 { "mip", DEBUG_MIPTREE
},
306 { "fall", DEBUG_FALLBACKS
},
307 { "verb", DEBUG_VERBOSE
},
308 { "bat", DEBUG_BATCH
},
309 { "pix", DEBUG_PIXEL
},
310 { "buf", DEBUG_BUFMGR
},
311 { "reg", DEBUG_REGION
},
313 { "lock", DEBUG_LOCK
},
314 { "sync", DEBUG_SYNC
},
315 { "prim", DEBUG_PRIMS
},
316 { "vert", DEBUG_VERTS
},
317 { "dri", DEBUG_DRI
},
318 { "dma", DEBUG_DMA
},
319 { "san", DEBUG_SANITY
},
320 { "sleep", DEBUG_SLEEP
},
321 { "stats", DEBUG_STATS
},
322 { "tile", DEBUG_TILE
},
323 { "sing", DEBUG_SINGLE_THREAD
},
324 { "thre", DEBUG_SINGLE_THREAD
},
326 { "urb", DEBUG_URB
},
333 intelInvalidateState(GLcontext
* ctx
, GLuint new_state
)
335 struct intel_context
*intel
= intel_context(ctx
);
337 _swrast_InvalidateState(ctx
, new_state
);
338 _swsetup_InvalidateState(ctx
, new_state
);
339 _vbo_InvalidateState(ctx
, new_state
);
340 _tnl_InvalidateState(ctx
, new_state
);
341 _tnl_invalidate_vertex_state(ctx
, new_state
);
343 intel
->NewGLState
|= new_state
;
345 if (intel
->vtbl
.invalidate_state
)
346 intel
->vtbl
.invalidate_state( intel
, new_state
);
351 intelFlush(GLcontext
* ctx
)
353 struct intel_context
*intel
= intel_context(ctx
);
358 if (!IS_965(intel
->intelScreen
->deviceID
))
359 INTEL_FIREVERTICES(intel
);
361 if (intel
->batch
->map
!= intel
->batch
->ptr
)
362 intel_batchbuffer_flush(intel
->batch
);
364 /* XXX: Need to do an MI_FLUSH here.
369 intelFinish(GLcontext
* ctx
)
371 struct intel_context
*intel
= intel_context(ctx
);
373 if (intel
->batch
->last_fence
) {
374 dri_fence_wait(intel
->batch
->last_fence
);
375 dri_fence_unreference(intel
->batch
->last_fence
);
376 intel
->batch
->last_fence
= NULL
;
381 intelBeginQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
383 struct intel_context
*intel
= intel_context( ctx
);
384 struct drm_i915_mmio io
= {
385 .read_write
= I915_MMIO_READ
,
386 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
390 intelFinish(&intel
->ctx
);
391 drmCommandWrite(intel
->driFd
, DRM_I915_MMIO
, &io
, sizeof(io
));
395 intelEndQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
397 struct intel_context
*intel
= intel_context( ctx
);
399 struct drm_i915_mmio io
= {
400 .read_write
= I915_MMIO_READ
,
401 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
404 intelFinish(&intel
->ctx
);
405 drmCommandWrite(intel
->driFd
, DRM_I915_MMIO
, &io
, sizeof(io
));
406 q
->Result
= tmp
- q
->Result
;
411 /** Driver-specific fence emit implementation for the fake memory manager. */
413 intel_fence_emit(void *private)
415 struct intel_context
*intel
= (struct intel_context
*)private;
418 /* XXX: Need to emit a flush, if we haven't already (at least with the
419 * current batchbuffer implementation, we have).
422 fence
= intelEmitIrqLocked(intel
);
427 /** Driver-specific fence wait implementation for the fake memory manager. */
429 intel_fence_wait(void *private, unsigned int cookie
)
431 struct intel_context
*intel
= (struct intel_context
*)private;
433 intelWaitIrq(intel
, cookie
);
439 intel_init_bufmgr(struct intel_context
*intel
)
441 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
442 GLboolean ttm_disable
= getenv("INTEL_NO_TTM") != NULL
;
443 GLboolean ttm_supported
;
445 /* If we've got a new enough DDX that's initializing TTM and giving us
446 * object handles for the shared buffers, use that.
448 intel
->ttm
= GL_FALSE
;
449 if (intel
->intelScreen
->driScrnPriv
->dri2
.enabled
)
450 ttm_supported
= GL_TRUE
;
451 else if (intel
->intelScreen
->driScrnPriv
->ddx_version
.minor
>= 9 &&
452 intel
->intelScreen
->drmMinor
>= 11 &&
453 intel
->intelScreen
->front
.bo_handle
!= -1)
454 ttm_supported
= GL_TRUE
;
456 ttm_supported
= GL_FALSE
;
458 if (!ttm_disable
&& ttm_supported
) {
460 intel
->bufmgr
= intel_bufmgr_ttm_init(intel
->driFd
,
463 DRM_I915_FENCE_TYPE_RW
,
465 if (intel
->bufmgr
!= NULL
)
466 intel
->ttm
= GL_TRUE
;
468 bo_reuse_mode
= driQueryOptioni(&intel
->optionCache
, "bo_reuse");
469 switch (bo_reuse_mode
) {
470 case DRI_CONF_BO_REUSE_DISABLED
:
472 case DRI_CONF_BO_REUSE_ALL
:
473 intel_ttm_enable_bo_reuse(intel
->bufmgr
);
477 /* Otherwise, use the classic buffer manager. */
478 if (intel
->bufmgr
== NULL
) {
480 fprintf(stderr
, "TTM buffer manager disabled. Using classic.\n");
482 fprintf(stderr
, "Failed to initialize TTM buffer manager. "
483 "Falling back to classic.\n");
486 if (intelScreen
->tex
.size
== 0) {
487 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
492 intel
->bufmgr
= dri_bufmgr_fake_init(intelScreen
->tex
.offset
,
493 intelScreen
->tex
.map
,
494 intelScreen
->tex
.size
,
504 intelInitDriverFunctions(struct dd_function_table
*functions
)
506 _mesa_init_driver_functions(functions
);
508 functions
->Flush
= intelFlush
;
509 functions
->Finish
= intelFinish
;
510 functions
->GetString
= intelGetString
;
511 functions
->UpdateState
= intelInvalidateState
;
513 functions
->CopyColorTable
= _swrast_CopyColorTable
;
514 functions
->CopyColorSubTable
= _swrast_CopyColorSubTable
;
515 functions
->CopyConvolutionFilter1D
= _swrast_CopyConvolutionFilter1D
;
516 functions
->CopyConvolutionFilter2D
= _swrast_CopyConvolutionFilter2D
;
518 functions
->BeginQuery
= intelBeginQuery
;
519 functions
->EndQuery
= intelEndQuery
;
521 intelInitTextureFuncs(functions
);
522 intelInitStateFuncs(functions
);
523 intelInitBufferFuncs(functions
);
528 intelInitContext(struct intel_context
*intel
,
529 const __GLcontextModes
* mesaVis
,
530 __DRIcontextPrivate
* driContextPriv
,
531 void *sharedContextPrivate
,
532 struct dd_function_table
*functions
)
534 GLcontext
*ctx
= &intel
->ctx
;
535 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
536 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
537 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*) sPriv
->private;
538 volatile struct drm_i915_sarea
*saPriv
= (struct drm_i915_sarea
*)
539 (((GLubyte
*) sPriv
->pSAREA
) + intelScreen
->sarea_priv_offset
);
542 if (!_mesa_initialize_context(&intel
->ctx
, mesaVis
, shareCtx
,
543 functions
, (void *) intel
)) {
544 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__
);
548 driContextPriv
->driverPrivate
= intel
;
549 intel
->intelScreen
= intelScreen
;
550 intel
->driScreen
= sPriv
;
551 intel
->sarea
= saPriv
;
554 intel
->hHWContext
= driContextPriv
->hHWContext
;
555 intel
->driFd
= sPriv
->fd
;
556 intel
->driHwLock
= sPriv
->lock
;
558 intel
->width
= intelScreen
->width
;
559 intel
->height
= intelScreen
->height
;
561 driParseConfigFiles(&intel
->optionCache
, &intelScreen
->optionCache
,
562 intel
->driScreen
->myNum
,
563 IS_965(intelScreen
->deviceID
) ? "i965" : "i915");
564 if (intelScreen
->deviceID
== PCI_CHIP_I865_G
)
565 intel
->maxBatchSize
= 4096;
567 intel
->maxBatchSize
= BATCH_SZ
;
569 if (!intel_init_bufmgr(intel
))
572 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
574 /* This doesn't yet catch all non-conformant rendering, but it's a
577 if (getenv("INTEL_STRICT_CONFORMANCE")) {
578 intel
->strict_conformance
= 1;
581 if (intel
->strict_conformance
) {
582 ctx
->Const
.MinLineWidth
= 1.0;
583 ctx
->Const
.MinLineWidthAA
= 1.0;
584 ctx
->Const
.MaxLineWidth
= 1.0;
585 ctx
->Const
.MaxLineWidthAA
= 1.0;
586 ctx
->Const
.LineWidthGranularity
= 1.0;
589 ctx
->Const
.MinLineWidth
= 1.0;
590 ctx
->Const
.MinLineWidthAA
= 1.0;
591 ctx
->Const
.MaxLineWidth
= 5.0;
592 ctx
->Const
.MaxLineWidthAA
= 5.0;
593 ctx
->Const
.LineWidthGranularity
= 0.5;
596 ctx
->Const
.MinPointSize
= 1.0;
597 ctx
->Const
.MinPointSizeAA
= 1.0;
598 ctx
->Const
.MaxPointSize
= 255.0;
599 ctx
->Const
.MaxPointSizeAA
= 3.0;
600 ctx
->Const
.PointSizeGranularity
= 1.0;
602 /* reinitialize the context point state.
603 * It depend on constants in __GLcontextRec::Const
605 _mesa_init_point(ctx
);
607 ctx
->Const
.MaxColorAttachments
= 4; /* XXX FBO: review this */
609 /* Initialize the software rasterizer and helper modules. */
610 _swrast_CreateContext(ctx
);
611 _vbo_CreateContext(ctx
);
612 _tnl_CreateContext(ctx
);
613 _swsetup_CreateContext(ctx
);
615 /* Configure swrast to match hardware characteristics: */
616 _swrast_allow_pixel_fog(ctx
, GL_FALSE
);
617 _swrast_allow_vertex_fog(ctx
, GL_TRUE
);
619 intel
->hw_stencil
= mesaVis
->stencilBits
&& mesaVis
->depthBits
== 24;
620 intel
->hw_stipple
= 1;
622 /* XXX FBO: this doesn't seem to be used anywhere */
623 switch (mesaVis
->depthBits
) {
624 case 0: /* what to do in this case? */
626 intel
->polygon_offset_scale
= 1.0;
629 intel
->polygon_offset_scale
= 2.0; /* req'd to pass glean */
636 if (IS_965(intelScreen
->deviceID
))
637 intel
->polygon_offset_scale
/= 0xffff;
639 intel
->RenderIndex
= ~0;
641 fthrottle_mode
= driQueryOptioni(&intel
->optionCache
, "fthrottle_mode");
642 intel
->irqsEmitted
= 0;
644 intel
->do_irqs
= (intel
->intelScreen
->irq_active
&&
645 fthrottle_mode
== DRI_CONF_FTHROTTLE_IRQS
);
647 intel
->do_usleeps
= (fthrottle_mode
== DRI_CONF_FTHROTTLE_USLEEPS
);
649 _math_matrix_ctr(&intel
->ViewportMatrix
);
651 if (IS_965(intelScreen
->deviceID
) && !intel
->intelScreen
->irq_active
) {
652 _mesa_printf("IRQs not active. Exiting\n");
656 intelInitExtensions(ctx
, GL_FALSE
);
658 INTEL_DEBUG
= driParseDebugString(getenv("INTEL_DEBUG"), debug_control
);
659 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
660 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
662 if (!sPriv
->dri2
.enabled
)
663 intel_recreate_static_regions(intel
);
665 intel
->batch
= intel_batchbuffer_alloc(intel
);
666 intel
->last_swap_fence
= NULL
;
667 intel
->first_swap_fence
= NULL
;
669 intel_bufferobj_init(intel
);
670 intel_fbo_init(intel
);
672 if (intel
->ctx
.Mesa_DXTn
) {
673 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
674 _mesa_enable_extension(ctx
, "GL_S3_s3tc");
676 else if (driQueryOptionb(&intel
->optionCache
, "force_s3tc_enable")) {
677 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
680 intel
->prim
.primitive
= ~0;
682 /* Force all software fallbacks */
683 if (driQueryOptionb(&intel
->optionCache
, "no_rast")) {
684 fprintf(stderr
, "disabling 3D rasterization\n");
685 FALLBACK(intel
, INTEL_FALLBACK_USER
, 1);
689 /* Disable all hardware rendering (skip emitting batches and fences/waits
692 intel
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
698 intelDestroyContext(__DRIcontextPrivate
* driContextPriv
)
700 struct intel_context
*intel
=
701 (struct intel_context
*) driContextPriv
->driverPrivate
;
703 assert(intel
); /* should never be null */
705 GLboolean release_texture_heaps
;
707 INTEL_FIREVERTICES(intel
);
709 intel
->vtbl
.destroy(intel
);
711 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
712 _swsetup_DestroyContext(&intel
->ctx
);
713 _tnl_DestroyContext(&intel
->ctx
);
714 _vbo_DestroyContext(&intel
->ctx
);
716 _swrast_DestroyContext(&intel
->ctx
);
717 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
719 intel_batchbuffer_free(intel
->batch
);
721 if (intel
->last_swap_fence
) {
722 dri_fence_wait(intel
->last_swap_fence
);
723 dri_fence_unreference(intel
->last_swap_fence
);
724 intel
->last_swap_fence
= NULL
;
726 if (intel
->first_swap_fence
) {
727 dri_fence_wait(intel
->first_swap_fence
);
728 dri_fence_unreference(intel
->first_swap_fence
);
729 intel
->first_swap_fence
= NULL
;
732 if (release_texture_heaps
) {
733 /* This share group is about to go away, free our private
734 * texture object data.
736 if (INTEL_DEBUG
& DEBUG_TEXTURE
)
737 fprintf(stderr
, "do something to free texture heaps\n");
740 /* free the Mesa context */
741 _mesa_free_context_data(&intel
->ctx
);
743 dri_bufmgr_destroy(intel
->bufmgr
);
748 intelUnbindContext(__DRIcontextPrivate
* driContextPriv
)
754 intelMakeCurrent(__DRIcontextPrivate
* driContextPriv
,
755 __DRIdrawablePrivate
* driDrawPriv
,
756 __DRIdrawablePrivate
* driReadPriv
)
758 __DRIscreenPrivate
*psp
= driDrawPriv
->driScreenPriv
;
760 if (driContextPriv
) {
761 struct intel_context
*intel
=
762 (struct intel_context
*) driContextPriv
->driverPrivate
;
763 struct intel_framebuffer
*intel_fb
=
764 (struct intel_framebuffer
*) driDrawPriv
->driverPrivate
;
765 GLframebuffer
*readFb
= (GLframebuffer
*) driReadPriv
->driverPrivate
;
768 /* XXX FBO temporary fix-ups! */
769 /* if the renderbuffers don't have regions, init them from the context */
770 if (!driContextPriv
->driScreenPriv
->dri2
.enabled
) {
771 struct intel_renderbuffer
*irbDepth
772 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
773 struct intel_renderbuffer
*irbStencil
774 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
776 if (intel_fb
->color_rb
[0]) {
777 intel_renderbuffer_set_region(intel_fb
->color_rb
[0],
778 intel
->front_region
);
780 if (intel_fb
->color_rb
[1]) {
781 intel_renderbuffer_set_region(intel_fb
->color_rb
[1],
785 if (intel_fb
->color_rb
[2]) {
786 intel_renderbuffer_set_region(intel_fb
->color_rb
[2],
787 intel
->third_region
);
791 intel_renderbuffer_set_region(irbDepth
, intel
->depth_region
);
794 intel_renderbuffer_set_region(irbStencil
, intel
->depth_region
);
798 /* set GLframebuffer size to match window, if needed */
799 driUpdateFramebufferSize(&intel
->ctx
, driDrawPriv
);
801 if (driReadPriv
!= driDrawPriv
) {
802 driUpdateFramebufferSize(&intel
->ctx
, driReadPriv
);
805 _mesa_make_current(&intel
->ctx
, &intel_fb
->Base
, readFb
);
807 /* The drawbuffer won't always be updated by _mesa_make_current:
809 if (intel
->ctx
.DrawBuffer
== &intel_fb
->Base
) {
811 if (intel
->driReadDrawable
!= driReadPriv
)
812 intel
->driReadDrawable
= driReadPriv
;
814 if (intel
->driDrawable
!= driDrawPriv
) {
815 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
818 driDrawPriv
->vblFlags
= (intel
->intelScreen
->irq_active
!= 0)
819 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
820 : VBLANK_FLAG_NO_IRQ
;
822 (*psp
->systemTime
->getUST
) (&intel_fb
->swap_ust
);
823 driDrawableInitVBlank(driDrawPriv
);
824 intel_fb
->vbl_waited
= driDrawPriv
->vblSeq
;
826 for (i
= 0; i
< (intel
->intelScreen
->third
.handle
? 3 : 2); i
++) {
827 if (intel_fb
->color_rb
[i
])
828 intel_fb
->color_rb
[i
]->vbl_pending
= driDrawPriv
->vblSeq
;
831 intel
->driDrawable
= driDrawPriv
;
832 intelWindowMoved(intel
);
835 intel_draw_buffer(&intel
->ctx
, &intel_fb
->Base
);
839 _mesa_make_current(NULL
, NULL
, NULL
);
846 intelContendedLock(struct intel_context
*intel
, GLuint flags
)
848 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
849 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
850 volatile struct drm_i915_sarea
*sarea
= intel
->sarea
;
851 int me
= intel
->hHWContext
;
853 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
856 if (INTEL_DEBUG
& DEBUG_LOCK
)
857 _mesa_printf("%s - got contended lock\n", __progname
);
859 /* If the window moved, may need to set a new cliprect now.
861 * NOTE: This releases and regains the hw lock, so all state
862 * checking must be done *after* this call:
865 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
867 if (sarea
&& sarea
->ctxOwner
!= me
) {
868 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
869 fprintf(stderr
, "Lost Context: sarea->ctxOwner %x me %x\n",
870 sarea
->ctxOwner
, me
);
872 sarea
->ctxOwner
= me
;
875 /* If the last consumer of the texture memory wasn't us, notify the fake
876 * bufmgr and record the new owner. We should have the memory shared
877 * between contexts of a single fake bufmgr, but this will at least make
878 * things correct for now.
880 if (!intel
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
881 sarea
->texAge
= intel
->hHWContext
;
882 dri_bufmgr_fake_contended_lock_take(intel
->bufmgr
);
883 if (INTEL_DEBUG
& DEBUG_BATCH
)
884 intel_decode_context_reset();
885 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
886 fprintf(stderr
, "Lost Textures: sarea->texAge %x hw context %x\n",
887 sarea
->ctxOwner
, intel
->hHWContext
);
890 if (sarea
->width
!= intel
->width
|| sarea
->height
!= intel
->height
) {
891 int numClipRects
= intel
->numClipRects
;
894 * FIXME: Really only need to do this when drawing to a
895 * common back- or front buffer.
899 * This will essentially drop the outstanding batchbuffer on
902 intel
->numClipRects
= 0;
905 _swrast_flush(&intel
->ctx
);
907 if (!IS_965(intel
->intelScreen
->deviceID
))
908 INTEL_FIREVERTICES(intel
);
910 if (intel
->batch
->map
!= intel
->batch
->ptr
)
911 intel_batchbuffer_flush(intel
->batch
);
913 intel
->numClipRects
= numClipRects
;
915 /* force window update */
916 intel
->lastStamp
= 0;
918 intel
->width
= sarea
->width
;
919 intel
->height
= sarea
->height
;
924 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
925 intelWindowMoved(intel
);
926 intel
->lastStamp
= dPriv
->lastStamp
;
931 _glthread_DECLARE_STATIC_MUTEX(lockMutex
);
933 /* Lock the hardware and validate our state.
935 void LOCK_HARDWARE( struct intel_context
*intel
)
937 __DRIdrawable
*dPriv
= intel
->driDrawable
;
938 __DRIscreen
*sPriv
= intel
->driScreen
;
940 struct intel_framebuffer
*intel_fb
= NULL
;
941 struct intel_renderbuffer
*intel_rb
= NULL
;
943 _glthread_LOCK_MUTEX(lockMutex
);
944 assert(!intel
->locked
);
947 if (intel
->driDrawable
) {
948 intel_fb
= intel
->driDrawable
->driverPrivate
;
952 intel_get_renderbuffer(&intel_fb
->Base
,
953 intel_fb
->Base
._ColorDrawBufferIndexes
[0]);
956 if (intel_rb
&& dPriv
->vblFlags
&&
957 !(dPriv
->vblFlags
& VBLANK_FLAG_NO_IRQ
) &&
958 (intel_fb
->vbl_waited
- intel_rb
->vbl_pending
) > (1<<23)) {
961 vbl
.request
.type
= DRM_VBLANK_ABSOLUTE
;
963 if ( dPriv
->vblFlags
& VBLANK_FLAG_SECONDARY
) {
964 vbl
.request
.type
|= DRM_VBLANK_SECONDARY
;
967 vbl
.request
.sequence
= intel_rb
->vbl_pending
;
968 drmWaitVBlank(intel
->driFd
, &vbl
);
969 intel_fb
->vbl_waited
= vbl
.reply
.sequence
;
972 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
973 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
975 if (sPriv
->dri2
.enabled
) {
977 drmGetLock(intel
->driFd
, intel
->hHWContext
, 0);
978 if (__driParseEvents(dPriv
->driContextPriv
, dPriv
)) {
979 intelWindowMoved(intel
);
980 intel_draw_buffer(&intel
->ctx
, intel
->ctx
.DrawBuffer
);
983 intelContendedLock( intel
, 0 );
987 if (INTEL_DEBUG
& DEBUG_LOCK
)
988 _mesa_printf("%s - locked\n", __progname
);
992 /* Unlock the hardware using the global current context
994 void UNLOCK_HARDWARE( struct intel_context
*intel
)
996 intel
->vtbl
.note_unlock( intel
);
999 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
1001 _glthread_UNLOCK_MUTEX(lockMutex
);
1003 if (INTEL_DEBUG
& DEBUG_LOCK
)
1004 _mesa_printf("%s - unlocked\n", __progname
);
1007 * Nothing should be left in batch outside of LOCK/UNLOCK which references
1010 assert(intel
->batch
->cliprect_mode
!= REFERENCES_CLIPRECTS
);