1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
43 #include "tnl/t_pipeline.h"
44 #include "tnl/t_vertex.h"
46 #include "drivers/common/driverfuncs.h"
48 #include "intel_screen.h"
49 #include "intel_chipset.h"
52 #include "i830_common.h"
54 #include "intel_tex.h"
55 #include "intel_span.h"
56 #include "intel_ioctl.h"
57 #include "intel_batchbuffer.h"
58 #include "intel_blit.h"
59 #include "intel_regions.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr_ttm.h"
69 int INTEL_DEBUG
= (0);
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_multi_draw_arrays
87 #define need_GL_EXT_secondary_color
88 #define need_GL_EXT_point_parameters
89 #define need_GL_VERSION_2_0
90 #define need_GL_VERSION_2_1
91 #define need_GL_ARB_shader_objects
92 #define need_GL_ARB_vertex_shader
94 #include "extension_helper.h"
100 /***************************************
101 * Mesa's Driver Functions
102 ***************************************/
104 #define DRIVER_VERSION "4.1.3002"
106 static const GLubyte
*intelGetString( GLcontext
*ctx
, GLenum name
)
108 const char * chipset
;
109 static char buffer
[128];
113 return (GLubyte
*)"Tungsten Graphics, Inc";
117 switch (intel_context(ctx
)->intelScreen
->deviceID
) {
118 case PCI_CHIP_I965_Q
:
119 chipset
= "Intel(R) 965Q";
121 case PCI_CHIP_I965_G
:
122 case PCI_CHIP_I965_G_1
:
123 chipset
= "Intel(R) 965G";
125 case PCI_CHIP_I946_GZ
:
126 chipset
= "Intel(R) 946GZ";
128 case PCI_CHIP_I965_GM
:
129 chipset
= "Intel(R) 965GM";
131 case PCI_CHIP_I965_GME
:
132 chipset
= "Intel(R) 965GME/GLE";
135 chipset
= "Unknown Intel Chipset";
138 (void) driGetRendererString( buffer
, chipset
, DRIVER_VERSION
, 0 );
139 return (GLubyte
*) buffer
;
148 * Extension strings exported by the intel driver.
151 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
152 * old i830-specific driver.
154 const struct dri_extension card_extensions
[] =
156 { "GL_ARB_multisample", GL_ARB_multisample_functions
},
157 { "GL_ARB_multitexture", NULL
},
158 { "GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
159 { "GL_NV_point_sprite", GL_NV_point_sprite_functions
},
160 { "GL_ARB_texture_border_clamp", NULL
},
161 { "GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
162 { "GL_ARB_texture_cube_map", NULL
},
163 { "GL_ARB_texture_env_add", NULL
},
164 { "GL_ARB_texture_env_combine", NULL
},
165 { "GL_ARB_texture_env_dot3", NULL
},
166 { "GL_ARB_texture_mirrored_repeat", NULL
},
167 { "GL_ARB_texture_non_power_of_two", NULL
},
168 { "GL_ARB_texture_rectangle", NULL
},
169 { "GL_NV_texture_rectangle", NULL
},
170 { "GL_EXT_texture_rectangle", NULL
},
171 { "GL_ARB_texture_rectangle", NULL
},
172 { "GL_ARB_point_sprite", NULL
},
173 { "GL_ARB_point_parameters", NULL
},
174 { "GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
175 { "GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
176 { "GL_ARB_window_pos", GL_ARB_window_pos_functions
},
177 { "GL_EXT_blend_color", GL_EXT_blend_color_functions
},
178 { "GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions
},
179 { "GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
180 { "GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
181 { "GL_EXT_blend_logic_op", NULL
},
182 { "GL_EXT_blend_subtract", NULL
},
183 { "GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
184 { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
185 { "GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
186 { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
187 { "GL_EXT_stencil_wrap", NULL
},
188 { "GL_EXT_texture_edge_clamp", NULL
},
189 { "GL_EXT_texture_env_combine", NULL
},
190 { "GL_EXT_texture_env_dot3", NULL
},
191 { "GL_EXT_texture_filter_anisotropic", NULL
},
192 { "GL_EXT_texture_lod_bias", NULL
},
193 { "GL_EXT_texture_sRGB", NULL
},
194 { "GL_3DFX_texture_compression_FXT1", NULL
},
195 { "GL_APPLE_client_storage", NULL
},
196 { "GL_MESA_pack_invert", NULL
},
197 { "GL_MESA_ycbcr_texture", NULL
},
198 { "GL_NV_blend_square", NULL
},
199 { "GL_SGIS_generate_mipmap", NULL
},
200 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions
},
201 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions
},
202 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions
},
203 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions
},
204 { "GL_ARB_fragment_shader", NULL
},
205 /* XXX not implement yet, to compile builtin glsl lib */
206 { "GL_ARB_draw_buffers", NULL
},
210 const struct dri_extension ttm_extensions
[] = {
211 {"GL_ARB_pixel_buffer_object", NULL
},
215 const struct dri_extension arb_oc_extension
=
216 { "GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions
};
219 * Initializes potential list of extensions if ctx == NULL, or actually enables
220 * extensions for a context.
222 void intelInitExtensions(GLcontext
*ctx
, GLboolean enable_imaging
)
224 struct intel_context
*intel
= ctx
?intel_context(ctx
):NULL
;
226 /* Disable imaging extension until convolution is working in teximage paths.
228 enable_imaging
= GL_FALSE
;
230 driInitExtensions(ctx
, card_extensions
, enable_imaging
);
232 if (intel
== NULL
|| intel
->ttm
)
233 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
235 if (intel
== NULL
|| intel
->intelScreen
->drmMinor
>= 8)
236 driInitSingleExtension(ctx
, &arb_oc_extension
);
239 static const struct dri_debug_control debug_control
[] =
241 { "fall", DEBUG_FALLBACKS
},
242 { "tex", DEBUG_TEXTURE
},
243 { "ioctl", DEBUG_IOCTL
},
244 { "prim", DEBUG_PRIMS
},
245 { "vert", DEBUG_VERTS
},
246 { "state", DEBUG_STATE
},
247 { "verb", DEBUG_VERBOSE
},
248 { "dri", DEBUG_DRI
},
249 { "dma", DEBUG_DMA
},
250 { "san", DEBUG_SANITY
},
251 { "sync", DEBUG_SYNC
},
252 { "sleep", DEBUG_SLEEP
},
253 { "pix", DEBUG_PIXEL
},
254 { "buf", DEBUG_BUFMGR
},
255 { "stats", DEBUG_STATS
},
256 { "tile", DEBUG_TILE
},
257 { "sing", DEBUG_SINGLE_THREAD
},
258 { "thre", DEBUG_SINGLE_THREAD
},
261 { "bat", DEBUG_BATCH
},
262 { "blit", DEBUG_BLIT
},
263 { "mip", DEBUG_MIPTREE
},
264 { "reg", DEBUG_REGION
},
269 static void intelInvalidateState( GLcontext
*ctx
, GLuint new_state
)
271 struct intel_context
*intel
= intel_context(ctx
);
273 _swrast_InvalidateState( ctx
, new_state
);
274 _swsetup_InvalidateState( ctx
, new_state
);
275 _vbo_InvalidateState( ctx
, new_state
);
276 _tnl_InvalidateState( ctx
, new_state
);
277 _tnl_invalidate_vertex_state( ctx
, new_state
);
279 intel
->NewGLState
|= new_state
;
281 if (intel
->vtbl
.invalidate_state
)
282 intel
->vtbl
.invalidate_state( intel
, new_state
);
286 void intelFlush( GLcontext
*ctx
)
288 struct intel_context
*intel
= intel_context( ctx
);
290 if (intel
->batch
->map
!= intel
->batch
->ptr
)
291 intel_batchbuffer_flush(intel
->batch
);
294 void intelFinish( GLcontext
*ctx
)
296 struct intel_context
*intel
= intel_context( ctx
);
299 if (intel
->batch
->last_fence
) {
300 dri_fence_wait(intel
->batch
->last_fence
);
301 dri_fence_unreference(intel
->batch
->last_fence
);
302 intel
->batch
->last_fence
= NULL
;
307 intelBeginQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
309 struct intel_context
*intel
= intel_context( ctx
);
311 .read_write
= MMIO_READ
,
312 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
316 intelFinish(&intel
->ctx
);
317 drmCommandWrite(intel
->driFd
, DRM_I830_MMIO
, &io
, sizeof(io
));
321 intelEndQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
323 struct intel_context
*intel
= intel_context( ctx
);
326 .read_write
= MMIO_READ
,
327 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
330 intelFinish(&intel
->ctx
);
331 drmCommandWrite(intel
->driFd
, DRM_I830_MMIO
, &io
, sizeof(io
));
332 q
->Result
= tmp
- q
->Result
;
337 /** Driver-specific fence emit implementation for the fake memory manager. */
339 intel_fence_emit(void *private)
341 struct intel_context
*intel
= (struct intel_context
*)private;
344 /* XXX: Need to emit a flush, if we haven't already (at least with the
345 * current batchbuffer implementation, we have).
348 fence
= intelEmitIrqLocked(intel
);
353 /** Driver-specific fence wait implementation for the fake memory manager. */
355 intel_fence_wait(void *private, unsigned int cookie
)
357 struct intel_context
*intel
= (struct intel_context
*)private;
359 intelWaitIrq(intel
, cookie
);
365 intel_init_bufmgr(struct intel_context
*intel
)
367 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
368 GLboolean ttm_disable
= getenv("INTEL_NO_TTM") != NULL
;
370 /* If we've got a new enough DDX that's initializing TTM and giving us
371 * object handles for the shared buffers, use that.
373 intel
->ttm
= GL_FALSE
;
375 intel
->intelScreen
->driScrnPriv
->ddx_version
.minor
>= 9 &&
376 intel
->intelScreen
->drmMinor
>= 11 &&
377 intel
->intelScreen
->front
.bo_handle
!= -1)
379 intel
->bufmgr
= intel_bufmgr_ttm_init(intel
->driFd
,
382 DRM_I915_FENCE_TYPE_RW
,
384 if (intel
->bufmgr
!= NULL
)
385 intel
->ttm
= GL_TRUE
;
387 /* Otherwise, use the classic buffer manager. */
388 if (intel
->bufmgr
== NULL
) {
390 fprintf(stderr
, "TTM buffer manager disabled. Using classic.\n");
392 fprintf(stderr
, "Failed to initialize TTM buffer manager. "
393 "Falling back to classic.\n");
396 if (intelScreen
->tex
.size
== 0) {
397 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
402 intel
->bufmgr
= dri_bufmgr_fake_init(intelScreen
->tex
.offset
,
403 intelScreen
->tex
.map
,
404 intelScreen
->tex
.size
,
414 void intelInitDriverFunctions( struct dd_function_table
*functions
)
416 _mesa_init_driver_functions( functions
);
418 functions
->Flush
= intelFlush
;
419 functions
->Finish
= intelFinish
;
420 functions
->GetString
= intelGetString
;
421 functions
->UpdateState
= intelInvalidateState
;
422 functions
->BeginQuery
= intelBeginQuery
;
423 functions
->EndQuery
= intelEndQuery
;
425 /* CopyPixels can be accelerated even with the current memory
428 if (!getenv("INTEL_NO_BLIT")) {
429 functions
->CopyPixels
= intelCopyPixels
;
430 functions
->Bitmap
= intelBitmap
;
433 intelInitTextureFuncs( functions
);
434 intelInitStateFuncs( functions
);
435 intelInitBufferFuncs( functions
);
438 GLboolean
intelInitContext( struct intel_context
*intel
,
439 const __GLcontextModes
*mesaVis
,
440 __DRIcontextPrivate
*driContextPriv
,
441 void *sharedContextPrivate
,
442 struct dd_function_table
*functions
)
444 GLcontext
*ctx
= &intel
->ctx
;
445 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
446 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
447 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*)sPriv
->private;
448 volatile drmI830Sarea
*saPriv
= (drmI830Sarea
*)
449 (((GLubyte
*)sPriv
->pSAREA
)+intelScreen
->sarea_priv_offset
);
451 if (!_mesa_initialize_context(&intel
->ctx
,
455 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__
);
459 driContextPriv
->driverPrivate
= intel
;
460 intel
->intelScreen
= intelScreen
;
461 intel
->driScreen
= sPriv
;
462 intel
->sarea
= saPriv
;
465 intel
->hHWContext
= driContextPriv
->hHWContext
;
466 intel
->driFd
= sPriv
->fd
;
467 intel
->driHwLock
= (drmLock
*) &sPriv
->pSAREA
->lock
;
469 intel
->maxBatchSize
= BATCH_SZ
;
471 if (!intel_init_bufmgr(intel
))
474 driParseConfigFiles (&intel
->optionCache
, &intelScreen
->optionCache
,
475 intel
->driScreen
->myNum
, "i965");
477 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
479 if (getenv("INTEL_STRICT_CONFORMANCE")) {
480 intel
->strict_conformance
= 1;
483 if (intel
->strict_conformance
) {
484 ctx
->Const
.MinLineWidth
= 1.0;
485 ctx
->Const
.MinLineWidthAA
= 1.0;
486 ctx
->Const
.MaxLineWidth
= 1.0;
487 ctx
->Const
.MaxLineWidthAA
= 1.0;
488 ctx
->Const
.LineWidthGranularity
= 1.0;
491 ctx
->Const
.MinLineWidth
= 1.0;
492 ctx
->Const
.MinLineWidthAA
= 1.0;
493 ctx
->Const
.MaxLineWidth
= 5.0;
494 ctx
->Const
.MaxLineWidthAA
= 5.0;
495 ctx
->Const
.LineWidthGranularity
= 0.5;
498 ctx
->Const
.MinPointSize
= 1.0;
499 ctx
->Const
.MinPointSizeAA
= 1.0;
500 ctx
->Const
.MaxPointSize
= 255.0;
501 ctx
->Const
.MaxPointSizeAA
= 3.0;
502 ctx
->Const
.PointSizeGranularity
= 1.0;
504 /* reinitialize the context point state.
505 * It depend on constants in __GLcontextRec::Const
507 _mesa_init_point(ctx
);
509 /* Initialize the software rasterizer and helper modules. */
510 _swrast_CreateContext( ctx
);
511 _vbo_CreateContext( ctx
);
512 _tnl_CreateContext( ctx
);
513 _swsetup_CreateContext( ctx
);
515 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
517 /* Configure swrast to match hardware characteristics: */
518 _swrast_allow_pixel_fog( ctx
, GL_FALSE
);
519 _swrast_allow_vertex_fog( ctx
, GL_TRUE
);
521 intel
->hw_stencil
= mesaVis
->stencilBits
&& mesaVis
->depthBits
== 24;
522 intel
->hw_stipple
= 1;
524 switch(mesaVis
->depthBits
) {
525 case 0: /* what to do in this case? */
527 intel
->polygon_offset_scale
= 1.0/0xffff;
530 intel
->polygon_offset_scale
= 2.0/0xffffff; /* req'd to pass glean */
537 /* Initialize swrast, tnl driver tables: */
538 intelInitSpanFuncs( ctx
);
540 if (!intel
->intelScreen
->irq_active
) {
541 _mesa_printf("IRQs not active. Exiting\n");
544 intelInitExtensions(ctx
, GL_TRUE
);
546 INTEL_DEBUG
= driParseDebugString( getenv( "INTEL_DEBUG" ),
548 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
549 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
551 intel_recreate_static_regions(intel
);
553 intel_bufferobj_init( intel
);
554 intel
->batch
= intel_batchbuffer_alloc( intel
);
555 intel
->last_swap_fence
= NULL
;
556 intel
->first_swap_fence
= NULL
;
558 if (intel
->ctx
.Mesa_DXTn
) {
559 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
560 _mesa_enable_extension( ctx
, "GL_S3_s3tc" );
562 else if (driQueryOptionb (&intel
->optionCache
, "force_s3tc_enable")) {
563 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
566 /* driInitTextureObjects( ctx, & intel->swapped, */
567 /* DRI_TEXMGR_DO_TEXTURE_1D | */
568 /* DRI_TEXMGR_DO_TEXTURE_2D | */
569 /* DRI_TEXMGR_DO_TEXTURE_RECT ); */
571 /* Force all software fallbacks */
572 if (getenv("INTEL_NO_RAST")) {
573 fprintf(stderr
, "disabling 3D rasterization\n");
577 /* Disable all hardware rendering (skip emitting batches and fences/waits
580 intel
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
585 void intelDestroyContext(__DRIcontextPrivate
*driContextPriv
)
587 struct intel_context
*intel
= (struct intel_context
*) driContextPriv
->driverPrivate
;
589 assert(intel
); /* should never be null */
591 GLboolean release_texture_heaps
;
594 intel
->vtbl
.destroy( intel
);
596 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
597 _swsetup_DestroyContext (&intel
->ctx
);
598 _tnl_DestroyContext (&intel
->ctx
);
599 _vbo_DestroyContext (&intel
->ctx
);
601 _swrast_DestroyContext (&intel
->ctx
);
602 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
603 intel_batchbuffer_free(intel
->batch
);
606 if (intel
->last_swap_fence
) {
607 dri_fence_wait(intel
->last_swap_fence
);
608 dri_fence_unreference(intel
->last_swap_fence
);
609 intel
->last_swap_fence
= NULL
;
611 if (intel
->first_swap_fence
) {
612 dri_fence_wait(intel
->first_swap_fence
);
613 dri_fence_unreference(intel
->first_swap_fence
);
614 intel
->first_swap_fence
= NULL
;
617 if ( release_texture_heaps
) {
618 /* This share group is about to go away, free our private
619 * texture object data.
622 /* XXX: destroy the shared bufmgr struct here?
626 /* Free the regions created to describe front/back/depth
630 intel_region_release(&intel
->front_region
);
631 intel_region_release(&intel
->back_region
);
632 intel_region_release(&intel
->depth_region
);
633 intel_region_release(&intel
->draw_region
);
636 /* free the Mesa context */
637 intel
->ctx
.VertexProgram
.Current
= NULL
;
638 intel
->ctx
.FragmentProgram
.Current
= NULL
;
639 _mesa_destroy_context(&intel
->ctx
);
642 driContextPriv
->driverPrivate
= NULL
;
645 GLboolean
intelUnbindContext(__DRIcontextPrivate
*driContextPriv
)
650 GLboolean
intelMakeCurrent(__DRIcontextPrivate
*driContextPriv
,
651 __DRIdrawablePrivate
*driDrawPriv
,
652 __DRIdrawablePrivate
*driReadPriv
)
655 if (driContextPriv
) {
656 struct intel_context
*intel
= (struct intel_context
*) driContextPriv
->driverPrivate
;
658 if (intel
->driReadDrawable
!= driReadPriv
) {
659 intel
->driReadDrawable
= driReadPriv
;
662 if ( intel
->driDrawable
!= driDrawPriv
) {
663 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
664 driDrawPriv
->vblFlags
= (intel
->intelScreen
->irq_active
!= 0)
665 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
666 : VBLANK_FLAG_NO_IRQ
;
667 driDrawableInitVBlank( driDrawPriv
);
670 intel
->driDrawable
= driDrawPriv
;
671 intelWindowMoved( intel
);
672 /* Shouldn't the readbuffer be stored also? */
675 _mesa_make_current(&intel
->ctx
,
676 (GLframebuffer
*) driDrawPriv
->driverPrivate
,
677 (GLframebuffer
*) driReadPriv
->driverPrivate
);
679 intel
->ctx
.Driver
.DrawBuffer( &intel
->ctx
, intel
->ctx
.Color
.DrawBuffer
[0] );
681 _mesa_make_current(NULL
, NULL
, NULL
);
688 static void intelContendedLock( struct intel_context
*intel
, GLuint flags
)
690 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
691 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
692 volatile drmI830Sarea
* sarea
= intel
->sarea
;
693 int me
= intel
->hHWContext
;
695 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
697 /* If the window moved, may need to set a new cliprect now.
699 * NOTE: This releases and regains the hw lock, so all state
700 * checking must be done *after* this call:
703 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
707 intel
->need_flush
= 1;
711 if (sarea
->ctxOwner
!= me
) {
712 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
713 fprintf(stderr
, "Lost Context: sarea->ctxOwner %x me %x\n",
714 sarea
->ctxOwner
, me
);
716 sarea
->ctxOwner
= me
;
717 intel
->vtbl
.lost_hardware( intel
);
720 /* If the last consumer of the texture memory wasn't us, notify the fake
721 * bufmgr and record the new owner. We should have the memory shared
722 * between contexts of a single fake bufmgr, but this will at least make
723 * things correct for now.
725 if (!intel
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
726 sarea
->texAge
= intel
->hHWContext
;
727 dri_bufmgr_fake_contended_lock_take(intel
->bufmgr
);
728 if (INTEL_DEBUG
& DEBUG_BATCH
)
729 intel_decode_context_reset();
730 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
731 fprintf(stderr
, "Lost Textures: sarea->texAge %x hw context %x\n",
732 sarea
->ctxOwner
, intel
->hHWContext
);
738 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
739 intelWindowMoved( intel
);
740 intel
->lastStamp
= dPriv
->lastStamp
;
744 _glthread_DECLARE_STATIC_MUTEX(lockMutex
);
746 /* Lock the hardware and validate our state.
748 void LOCK_HARDWARE( struct intel_context
*intel
)
752 _glthread_LOCK_MUTEX(lockMutex
);
753 assert(!intel
->locked
);
756 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
757 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
759 intelContendedLock( intel
, 0 );
766 /* Unlock the hardware using the global current context
768 void UNLOCK_HARDWARE( struct intel_context
*intel
)
770 intel
->vtbl
.note_unlock( intel
);
773 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
774 _glthread_UNLOCK_MUTEX(lockMutex
);