1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
43 #include "tnl/t_pipeline.h"
44 #include "tnl/t_vertex.h"
46 #include "drivers/common/driverfuncs.h"
48 #include "intel_screen.h"
49 #include "intel_chipset.h"
52 #include "i830_common.h"
54 #include "intel_tex.h"
55 #include "intel_span.h"
56 #include "intel_ioctl.h"
57 #include "intel_batchbuffer.h"
58 #include "intel_blit.h"
59 #include "intel_regions.h"
60 #include "intel_buffers.h"
61 #include "intel_buffer_objects.h"
62 #include "intel_decode.h"
63 #include "intel_fbo.h"
64 #include "intel_bufmgr_ttm.h"
66 #include "drirenderbuffer.h"
72 int INTEL_DEBUG
= (0);
75 #define need_GL_NV_point_sprite
76 #define need_GL_ARB_multisample
77 #define need_GL_ARB_point_parameters
78 #define need_GL_ARB_texture_compression
79 #define need_GL_ARB_vertex_buffer_object
80 #define need_GL_ARB_vertex_program
81 #define need_GL_ARB_window_pos
82 #define need_GL_ARB_occlusion_query
83 #define need_GL_EXT_blend_color
84 #define need_GL_EXT_blend_equation_separate
85 #define need_GL_EXT_blend_func_separate
86 #define need_GL_EXT_blend_minmax
87 #define need_GL_EXT_cull_vertex
88 #define need_GL_EXT_fog_coord
89 #define need_GL_EXT_framebuffer_object
90 #define need_GL_EXT_multi_draw_arrays
91 #define need_GL_EXT_secondary_color
92 #define need_GL_ATI_separate_stencil
93 #define need_GL_EXT_point_parameters
94 #define need_GL_VERSION_2_0
95 #define need_GL_VERSION_2_1
96 #define need_GL_ARB_shader_objects
97 #define need_GL_ARB_vertex_shader
99 #include "extension_helper.h"
105 /***************************************
106 * Mesa's Driver Functions
107 ***************************************/
109 #define DRIVER_VERSION "4.1.3002"
111 static const GLubyte
*intelGetString( GLcontext
*ctx
, GLenum name
)
113 const char * chipset
;
114 static char buffer
[128];
118 return (GLubyte
*)"Tungsten Graphics, Inc";
122 switch (intel_context(ctx
)->intelScreen
->deviceID
) {
123 case PCI_CHIP_I965_Q
:
124 chipset
= "Intel(R) 965Q";
126 case PCI_CHIP_I965_G
:
127 case PCI_CHIP_I965_G_1
:
128 chipset
= "Intel(R) 965G";
130 case PCI_CHIP_I946_GZ
:
131 chipset
= "Intel(R) 946GZ";
133 case PCI_CHIP_I965_GM
:
134 chipset
= "Intel(R) 965GM";
136 case PCI_CHIP_I965_GME
:
137 chipset
= "Intel(R) 965GME/GLE";
140 chipset
= "Unknown Intel Chipset";
143 (void) driGetRendererString( buffer
, chipset
, DRIVER_VERSION
, 0 );
144 return (GLubyte
*) buffer
;
153 * Extension strings exported by the intel driver.
156 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
157 * old i830-specific driver.
159 const struct dri_extension card_extensions
[] =
161 { "GL_ARB_multisample", GL_ARB_multisample_functions
},
162 { "GL_ARB_multitexture", NULL
},
163 { "GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
164 { "GL_NV_point_sprite", GL_NV_point_sprite_functions
},
165 { "GL_ARB_texture_border_clamp", NULL
},
166 { "GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
167 { "GL_ARB_texture_cube_map", NULL
},
168 { "GL_ARB_texture_env_add", NULL
},
169 { "GL_ARB_texture_env_combine", NULL
},
170 { "GL_ARB_texture_env_dot3", NULL
},
171 { "GL_ARB_texture_mirrored_repeat", NULL
},
172 { "GL_ARB_texture_non_power_of_two", NULL
},
173 { "GL_ARB_texture_rectangle", NULL
},
174 { "GL_NV_texture_rectangle", NULL
},
175 { "GL_EXT_texture_rectangle", NULL
},
176 { "GL_ARB_texture_rectangle", NULL
},
177 { "GL_ARB_point_sprite", NULL
},
178 { "GL_ARB_point_parameters", NULL
},
179 { "GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
180 { "GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
181 { "GL_ARB_window_pos", GL_ARB_window_pos_functions
},
182 { "GL_EXT_blend_color", GL_EXT_blend_color_functions
},
183 { "GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions
},
184 { "GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
185 { "GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
186 { "GL_EXT_blend_logic_op", NULL
},
187 { "GL_EXT_blend_subtract", NULL
},
188 { "GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
189 { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
190 { "GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
191 { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
192 { "GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions
},
193 { "GL_EXT_stencil_wrap", NULL
},
194 /* Do not enable this extension. It conflicts with GL_ATI_separate_stencil
195 * and 2.0's separate stencil, because mesa's computed _TestTwoSide will
196 * only reflect whether it's enabled through this extension, even if the
197 * application is using the other interfaces.
199 /*{ "GL_EXT_stencil_two_side", GL_EXT_stencil_two_side_functions },*/
200 { "GL_EXT_texture_edge_clamp", NULL
},
201 { "GL_EXT_texture_env_combine", NULL
},
202 { "GL_EXT_texture_env_dot3", NULL
},
203 { "GL_EXT_texture_filter_anisotropic", NULL
},
204 { "GL_EXT_texture_lod_bias", NULL
},
205 { "GL_EXT_texture_sRGB", NULL
},
206 { "GL_3DFX_texture_compression_FXT1", NULL
},
207 { "GL_APPLE_client_storage", NULL
},
208 { "GL_MESA_pack_invert", NULL
},
209 { "GL_MESA_ycbcr_texture", NULL
},
210 { "GL_NV_blend_square", NULL
},
211 { "GL_SGIS_generate_mipmap", NULL
},
212 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions
},
213 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions
},
214 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions
},
215 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions
},
216 { "GL_ARB_fragment_shader", NULL
},
217 { "GL_ARB_draw_buffers", NULL
},
221 const struct dri_extension ttm_extensions
[] = {
222 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions
},
223 {"GL_ARB_pixel_buffer_object", NULL
},
227 const struct dri_extension arb_oc_extension
=
228 { "GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions
};
231 * Initializes potential list of extensions if ctx == NULL, or actually enables
232 * extensions for a context.
234 void intelInitExtensions(GLcontext
*ctx
, GLboolean enable_imaging
)
236 struct intel_context
*intel
= ctx
?intel_context(ctx
):NULL
;
238 /* Disable imaging extension until convolution is working in teximage paths.
240 enable_imaging
= GL_FALSE
;
242 driInitExtensions(ctx
, card_extensions
, enable_imaging
);
244 if (intel
== NULL
|| intel
->ttm
)
245 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
247 if (intel
== NULL
|| intel
->intelScreen
->drmMinor
>= 8)
248 driInitSingleExtension(ctx
, &arb_oc_extension
);
251 static const struct dri_debug_control debug_control
[] =
253 { "fall", DEBUG_FALLBACKS
},
254 { "tex", DEBUG_TEXTURE
},
255 { "ioctl", DEBUG_IOCTL
},
256 { "prim", DEBUG_PRIMS
},
257 { "vert", DEBUG_VERTS
},
258 { "state", DEBUG_STATE
},
259 { "verb", DEBUG_VERBOSE
},
260 { "dri", DEBUG_DRI
},
261 { "dma", DEBUG_DMA
},
262 { "san", DEBUG_SANITY
},
263 { "sync", DEBUG_SYNC
},
264 { "sleep", DEBUG_SLEEP
},
265 { "pix", DEBUG_PIXEL
},
266 { "buf", DEBUG_BUFMGR
},
267 { "stats", DEBUG_STATS
},
268 { "tile", DEBUG_TILE
},
269 { "sing", DEBUG_SINGLE_THREAD
},
270 { "thre", DEBUG_SINGLE_THREAD
},
273 { "bat", DEBUG_BATCH
},
274 { "blit", DEBUG_BLIT
},
275 { "mip", DEBUG_MIPTREE
},
276 { "reg", DEBUG_REGION
},
277 { "fbo", DEBUG_FBO
},
282 static void intelInvalidateState( GLcontext
*ctx
, GLuint new_state
)
284 struct intel_context
*intel
= intel_context(ctx
);
286 _swrast_InvalidateState( ctx
, new_state
);
287 _swsetup_InvalidateState( ctx
, new_state
);
288 _vbo_InvalidateState( ctx
, new_state
);
289 _tnl_InvalidateState( ctx
, new_state
);
290 _tnl_invalidate_vertex_state( ctx
, new_state
);
292 intel
->NewGLState
|= new_state
;
294 if (intel
->vtbl
.invalidate_state
)
295 intel
->vtbl
.invalidate_state( intel
, new_state
);
299 void intelFlush( GLcontext
*ctx
)
301 struct intel_context
*intel
= intel_context( ctx
);
303 if (intel
->batch
->map
!= intel
->batch
->ptr
)
304 intel_batchbuffer_flush(intel
->batch
);
307 void intelFinish( GLcontext
*ctx
)
309 struct intel_context
*intel
= intel_context( ctx
);
312 if (intel
->batch
->last_fence
) {
313 dri_fence_wait(intel
->batch
->last_fence
);
314 dri_fence_unreference(intel
->batch
->last_fence
);
315 intel
->batch
->last_fence
= NULL
;
320 intelBeginQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
322 struct intel_context
*intel
= intel_context( ctx
);
324 .read_write
= MMIO_READ
,
325 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
329 intelFinish(&intel
->ctx
);
330 drmCommandWrite(intel
->driFd
, DRM_I830_MMIO
, &io
, sizeof(io
));
334 intelEndQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
336 struct intel_context
*intel
= intel_context( ctx
);
339 .read_write
= MMIO_READ
,
340 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
343 intelFinish(&intel
->ctx
);
344 drmCommandWrite(intel
->driFd
, DRM_I830_MMIO
, &io
, sizeof(io
));
345 q
->Result
= tmp
- q
->Result
;
350 /** Driver-specific fence emit implementation for the fake memory manager. */
352 intel_fence_emit(void *private)
354 struct intel_context
*intel
= (struct intel_context
*)private;
357 /* XXX: Need to emit a flush, if we haven't already (at least with the
358 * current batchbuffer implementation, we have).
361 fence
= intelEmitIrqLocked(intel
);
366 /** Driver-specific fence wait implementation for the fake memory manager. */
368 intel_fence_wait(void *private, unsigned int cookie
)
370 struct intel_context
*intel
= (struct intel_context
*)private;
372 intelWaitIrq(intel
, cookie
);
378 intel_init_bufmgr(struct intel_context
*intel
)
380 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
381 GLboolean ttm_disable
= getenv("INTEL_NO_TTM") != NULL
;
383 /* If we've got a new enough DDX that's initializing TTM and giving us
384 * object handles for the shared buffers, use that.
386 intel
->ttm
= GL_FALSE
;
388 intel
->intelScreen
->driScrnPriv
->ddx_version
.minor
>= 9 &&
389 intel
->intelScreen
->drmMinor
>= 11 &&
390 intel
->intelScreen
->front
.bo_handle
!= -1)
392 intel
->bufmgr
= intel_bufmgr_ttm_init(intel
->driFd
,
395 DRM_I915_FENCE_TYPE_RW
,
397 if (intel
->bufmgr
!= NULL
)
398 intel
->ttm
= GL_TRUE
;
400 /* Otherwise, use the classic buffer manager. */
401 if (intel
->bufmgr
== NULL
) {
403 fprintf(stderr
, "TTM buffer manager disabled. Using classic.\n");
405 fprintf(stderr
, "Failed to initialize TTM buffer manager. "
406 "Falling back to classic.\n");
409 if (intelScreen
->tex
.size
== 0) {
410 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
415 intel
->bufmgr
= dri_bufmgr_fake_init(intelScreen
->tex
.offset
,
416 intelScreen
->tex
.map
,
417 intelScreen
->tex
.size
,
427 void intelInitDriverFunctions( struct dd_function_table
*functions
)
429 _mesa_init_driver_functions( functions
);
431 functions
->Flush
= intelFlush
;
432 functions
->Finish
= intelFinish
;
433 functions
->GetString
= intelGetString
;
434 functions
->UpdateState
= intelInvalidateState
;
435 functions
->BeginQuery
= intelBeginQuery
;
436 functions
->EndQuery
= intelEndQuery
;
438 /* CopyPixels can be accelerated even with the current memory
441 if (!getenv("INTEL_NO_BLIT")) {
442 functions
->CopyPixels
= intelCopyPixels
;
443 functions
->Bitmap
= intelBitmap
;
446 intelInitTextureFuncs( functions
);
447 intelInitStateFuncs( functions
);
448 intelInitBufferFuncs( functions
);
451 GLboolean
intelInitContext( struct intel_context
*intel
,
452 const __GLcontextModes
*mesaVis
,
453 __DRIcontextPrivate
*driContextPriv
,
454 void *sharedContextPrivate
,
455 struct dd_function_table
*functions
)
457 GLcontext
*ctx
= &intel
->ctx
;
458 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
459 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
460 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*)sPriv
->private;
461 volatile drmI830Sarea
*saPriv
= (drmI830Sarea
*)
462 (((GLubyte
*)sPriv
->pSAREA
)+intelScreen
->sarea_priv_offset
);
464 if (!_mesa_initialize_context(&intel
->ctx
,
468 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__
);
472 driContextPriv
->driverPrivate
= intel
;
473 intel
->intelScreen
= intelScreen
;
474 intel
->driScreen
= sPriv
;
475 intel
->sarea
= saPriv
;
478 intel
->hHWContext
= driContextPriv
->hHWContext
;
479 intel
->driFd
= sPriv
->fd
;
480 intel
->driHwLock
= (drmLock
*) &sPriv
->pSAREA
->lock
;
482 intel
->maxBatchSize
= BATCH_SZ
;
484 if (!intel_init_bufmgr(intel
))
487 driParseConfigFiles (&intel
->optionCache
, &intelScreen
->optionCache
,
488 intel
->driScreen
->myNum
, "i965");
490 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
492 if (getenv("INTEL_STRICT_CONFORMANCE")) {
493 intel
->strict_conformance
= 1;
496 if (intel
->strict_conformance
) {
497 ctx
->Const
.MinLineWidth
= 1.0;
498 ctx
->Const
.MinLineWidthAA
= 1.0;
499 ctx
->Const
.MaxLineWidth
= 1.0;
500 ctx
->Const
.MaxLineWidthAA
= 1.0;
501 ctx
->Const
.LineWidthGranularity
= 1.0;
504 ctx
->Const
.MinLineWidth
= 1.0;
505 ctx
->Const
.MinLineWidthAA
= 1.0;
506 ctx
->Const
.MaxLineWidth
= 5.0;
507 ctx
->Const
.MaxLineWidthAA
= 5.0;
508 ctx
->Const
.LineWidthGranularity
= 0.5;
511 ctx
->Const
.MinPointSize
= 1.0;
512 ctx
->Const
.MinPointSizeAA
= 1.0;
513 ctx
->Const
.MaxPointSize
= 255.0;
514 ctx
->Const
.MaxPointSizeAA
= 3.0;
515 ctx
->Const
.PointSizeGranularity
= 1.0;
517 /* reinitialize the context point state.
518 * It depend on constants in __GLcontextRec::Const
520 _mesa_init_point(ctx
);
522 /* Initialize the software rasterizer and helper modules. */
523 _swrast_CreateContext( ctx
);
524 _vbo_CreateContext( ctx
);
525 _tnl_CreateContext( ctx
);
526 _swsetup_CreateContext( ctx
);
528 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
530 /* Configure swrast to match hardware characteristics: */
531 _swrast_allow_pixel_fog( ctx
, GL_FALSE
);
532 _swrast_allow_vertex_fog( ctx
, GL_TRUE
);
534 intel
->hw_stencil
= mesaVis
->stencilBits
&& mesaVis
->depthBits
== 24;
535 intel
->hw_stipple
= 1;
537 switch(mesaVis
->depthBits
) {
538 case 0: /* what to do in this case? */
540 intel
->polygon_offset_scale
= 1.0/0xffff;
543 intel
->polygon_offset_scale
= 2.0/0xffffff; /* req'd to pass glean */
550 /* Initialize swrast, tnl driver tables: */
551 intelInitSpanFuncs( ctx
);
553 if (!intel
->intelScreen
->irq_active
) {
554 _mesa_printf("IRQs not active. Exiting\n");
557 intelInitExtensions(ctx
, GL_TRUE
);
559 INTEL_DEBUG
= driParseDebugString( getenv( "INTEL_DEBUG" ),
561 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
562 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
564 intel_recreate_static_regions(intel
);
566 intel_bufferobj_init( intel
);
567 intel_fbo_init( intel
);
569 intel
->batch
= intel_batchbuffer_alloc( intel
);
570 intel
->last_swap_fence
= NULL
;
571 intel
->first_swap_fence
= NULL
;
573 if (intel
->ctx
.Mesa_DXTn
) {
574 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
575 _mesa_enable_extension( ctx
, "GL_S3_s3tc" );
577 else if (driQueryOptionb (&intel
->optionCache
, "force_s3tc_enable")) {
578 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
581 /* driInitTextureObjects( ctx, & intel->swapped, */
582 /* DRI_TEXMGR_DO_TEXTURE_1D | */
583 /* DRI_TEXMGR_DO_TEXTURE_2D | */
584 /* DRI_TEXMGR_DO_TEXTURE_RECT ); */
586 /* Force all software fallbacks */
587 if (getenv("INTEL_NO_RAST")) {
588 fprintf(stderr
, "disabling 3D rasterization\n");
592 /* Disable all hardware rendering (skip emitting batches and fences/waits
595 intel
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
600 void intelDestroyContext(__DRIcontextPrivate
*driContextPriv
)
602 struct intel_context
*intel
= (struct intel_context
*) driContextPriv
->driverPrivate
;
604 assert(intel
); /* should never be null */
606 GLboolean release_texture_heaps
;
609 intel
->vtbl
.destroy( intel
);
611 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
612 _swsetup_DestroyContext (&intel
->ctx
);
613 _tnl_DestroyContext (&intel
->ctx
);
614 _vbo_DestroyContext (&intel
->ctx
);
616 _swrast_DestroyContext (&intel
->ctx
);
617 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
618 intel_batchbuffer_free(intel
->batch
);
621 if (intel
->last_swap_fence
) {
622 dri_fence_wait(intel
->last_swap_fence
);
623 dri_fence_unreference(intel
->last_swap_fence
);
624 intel
->last_swap_fence
= NULL
;
626 if (intel
->first_swap_fence
) {
627 dri_fence_wait(intel
->first_swap_fence
);
628 dri_fence_unreference(intel
->first_swap_fence
);
629 intel
->first_swap_fence
= NULL
;
632 if ( release_texture_heaps
) {
633 /* This share group is about to go away, free our private
634 * texture object data.
637 /* XXX: destroy the shared bufmgr struct here?
641 /* free the Mesa context */
642 intel
->ctx
.VertexProgram
.Current
= NULL
;
643 intel
->ctx
.FragmentProgram
.Current
= NULL
;
644 _mesa_destroy_context(&intel
->ctx
);
647 driContextPriv
->driverPrivate
= NULL
;
650 GLboolean
intelUnbindContext(__DRIcontextPrivate
*driContextPriv
)
655 GLboolean
intelMakeCurrent(__DRIcontextPrivate
*driContextPriv
,
656 __DRIdrawablePrivate
*driDrawPriv
,
657 __DRIdrawablePrivate
*driReadPriv
)
660 if (driContextPriv
) {
661 struct intel_context
*intel
=
662 (struct intel_context
*) driContextPriv
->driverPrivate
;
663 struct intel_framebuffer
*intel_fb
=
664 (struct intel_framebuffer
*) driDrawPriv
->driverPrivate
;
665 GLframebuffer
*readFb
= (GLframebuffer
*) driReadPriv
->driverPrivate
;
667 /* XXX FBO temporary fix-ups! */
668 /* if the renderbuffers don't have regions, init them from the context.
669 * They will be unreferenced when the renderbuffer is destroyed.
672 struct intel_renderbuffer
*irbDepth
673 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
674 struct intel_renderbuffer
*irbStencil
675 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
677 if (intel_fb
->color_rb
[0] && !intel_fb
->color_rb
[0]->region
) {
678 intel_region_reference(&intel_fb
->color_rb
[0]->region
,
679 intel
->front_region
);
681 if (intel_fb
->color_rb
[1] && !intel_fb
->color_rb
[1]->region
) {
682 intel_region_reference(&intel_fb
->color_rb
[1]->region
,
685 if (intel_fb
->color_rb
[2] && !intel_fb
->color_rb
[2]->region
) {
686 intel_region_reference(&intel_fb
->color_rb
[2]->region
,
687 intel
->third_region
);
689 if (irbDepth
&& !irbDepth
->region
) {
690 intel_region_reference(&irbDepth
->region
, intel
->depth_region
);
692 if (irbStencil
&& !irbStencil
->region
) {
693 intel_region_reference(&irbStencil
->region
, intel
->depth_region
);
697 /* set GLframebuffer size to match window, if needed */
698 driUpdateFramebufferSize(&intel
->ctx
, driDrawPriv
);
700 if (intel
->driReadDrawable
!= driReadPriv
) {
701 intel
->driReadDrawable
= driReadPriv
;
704 if ( intel
->driDrawable
!= driDrawPriv
) {
705 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
706 driDrawPriv
->vblFlags
= (intel
->intelScreen
->irq_active
!= 0)
707 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
708 : VBLANK_FLAG_NO_IRQ
;
709 driDrawableInitVBlank( driDrawPriv
);
712 intel
->driDrawable
= driDrawPriv
;
713 intelWindowMoved( intel
);
714 /* Shouldn't the readbuffer be stored also? */
717 _mesa_make_current(&intel
->ctx
,
721 intel_draw_buffer(&intel
->ctx
, &intel_fb
->Base
);
723 _mesa_make_current(NULL
, NULL
, NULL
);
730 static void intelContendedLock( struct intel_context
*intel
, GLuint flags
)
732 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
733 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
734 volatile drmI830Sarea
* sarea
= intel
->sarea
;
735 int me
= intel
->hHWContext
;
737 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
739 /* If the window moved, may need to set a new cliprect now.
741 * NOTE: This releases and regains the hw lock, so all state
742 * checking must be done *after* this call:
745 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
749 intel
->need_flush
= 1;
753 if (sarea
->ctxOwner
!= me
) {
754 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
755 fprintf(stderr
, "Lost Context: sarea->ctxOwner %x me %x\n",
756 sarea
->ctxOwner
, me
);
758 sarea
->ctxOwner
= me
;
759 intel
->vtbl
.lost_hardware( intel
);
762 /* If the last consumer of the texture memory wasn't us, notify the fake
763 * bufmgr and record the new owner. We should have the memory shared
764 * between contexts of a single fake bufmgr, but this will at least make
765 * things correct for now.
767 if (!intel
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
768 sarea
->texAge
= intel
->hHWContext
;
769 dri_bufmgr_fake_contended_lock_take(intel
->bufmgr
);
770 if (INTEL_DEBUG
& DEBUG_BATCH
)
771 intel_decode_context_reset();
772 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
773 fprintf(stderr
, "Lost Textures: sarea->texAge %x hw context %x\n",
774 sarea
->ctxOwner
, intel
->hHWContext
);
780 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
781 intelWindowMoved( intel
);
782 intel
->lastStamp
= dPriv
->lastStamp
;
786 _glthread_DECLARE_STATIC_MUTEX(lockMutex
);
788 /* Lock the hardware and validate our state.
790 void LOCK_HARDWARE( struct intel_context
*intel
)
794 _glthread_LOCK_MUTEX(lockMutex
);
795 assert(!intel
->locked
);
798 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
799 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
801 intelContendedLock( intel
, 0 );
808 /* Unlock the hardware using the global current context
810 void UNLOCK_HARDWARE( struct intel_context
*intel
)
812 intel
->vtbl
.note_unlock( intel
);
815 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
816 _glthread_UNLOCK_MUTEX(lockMutex
);