1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
43 #include "tnl/t_pipeline.h"
44 #include "tnl/t_vertex.h"
46 #include "drivers/common/driverfuncs.h"
48 #include "intel_screen.h"
49 #include "intel_chipset.h"
52 #include "i830_common.h"
54 #include "intel_tex.h"
55 #include "intel_span.h"
56 #include "intel_ioctl.h"
57 #include "intel_batchbuffer.h"
58 #include "intel_blit.h"
59 #include "intel_regions.h"
60 #include "intel_buffers.h"
61 #include "intel_buffer_objects.h"
62 #include "intel_decode.h"
63 #include "intel_fbo.h"
64 #include "intel_bufmgr_ttm.h"
66 #include "drirenderbuffer.h"
72 int INTEL_DEBUG
= (0);
75 #define need_GL_NV_point_sprite
76 #define need_GL_ARB_multisample
77 #define need_GL_ARB_point_parameters
78 #define need_GL_ARB_texture_compression
79 #define need_GL_ARB_vertex_buffer_object
80 #define need_GL_ARB_vertex_program
81 #define need_GL_ARB_window_pos
82 #define need_GL_ARB_occlusion_query
83 #define need_GL_EXT_blend_color
84 #define need_GL_EXT_blend_equation_separate
85 #define need_GL_EXT_blend_func_separate
86 #define need_GL_EXT_blend_minmax
87 #define need_GL_EXT_cull_vertex
88 #define need_GL_EXT_fog_coord
89 #define need_GL_EXT_framebuffer_object
90 #define need_GL_EXT_multi_draw_arrays
91 #define need_GL_EXT_secondary_color
92 #define need_GL_ATI_separate_stencil
93 #define need_GL_EXT_point_parameters
94 #define need_GL_VERSION_2_0
95 #define need_GL_VERSION_2_1
96 #define need_GL_ARB_shader_objects
97 #define need_GL_ARB_vertex_shader
99 #include "extension_helper.h"
105 /***************************************
106 * Mesa's Driver Functions
107 ***************************************/
109 #define DRIVER_VERSION "4.1.3002"
111 static const GLubyte
*intelGetString( GLcontext
*ctx
, GLenum name
)
113 const char * chipset
;
114 static char buffer
[128];
118 return (GLubyte
*)"Tungsten Graphics, Inc";
122 switch (intel_context(ctx
)->intelScreen
->deviceID
) {
123 case PCI_CHIP_I965_Q
:
124 chipset
= "Intel(R) 965Q";
126 case PCI_CHIP_I965_G
:
127 case PCI_CHIP_I965_G_1
:
128 chipset
= "Intel(R) 965G";
130 case PCI_CHIP_I946_GZ
:
131 chipset
= "Intel(R) 946GZ";
133 case PCI_CHIP_I965_GM
:
134 chipset
= "Intel(R) 965GM";
136 case PCI_CHIP_I965_GME
:
137 chipset
= "Intel(R) 965GME/GLE";
139 case PCI_CHIP_IGD_GM
:
140 chipset
= "Intel(R) Integrated Graphics Device";
143 chipset
= "Unknown Intel Chipset";
146 (void) driGetRendererString( buffer
, chipset
, DRIVER_VERSION
, 0 );
147 return (GLubyte
*) buffer
;
156 * Extension strings exported by the intel driver.
159 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
160 * old i830-specific driver.
162 const struct dri_extension card_extensions
[] =
164 { "GL_ARB_multisample", GL_ARB_multisample_functions
},
165 { "GL_ARB_multitexture", NULL
},
166 { "GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
167 { "GL_NV_point_sprite", GL_NV_point_sprite_functions
},
168 { "GL_ARB_texture_border_clamp", NULL
},
169 { "GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
170 { "GL_ARB_texture_cube_map", NULL
},
171 { "GL_ARB_texture_env_add", NULL
},
172 { "GL_ARB_texture_env_combine", NULL
},
173 { "GL_ARB_texture_env_dot3", NULL
},
174 { "GL_ARB_texture_mirrored_repeat", NULL
},
175 { "GL_ARB_texture_non_power_of_two", NULL
},
176 { "GL_ARB_texture_rectangle", NULL
},
177 { "GL_NV_texture_rectangle", NULL
},
178 { "GL_EXT_texture_rectangle", NULL
},
179 { "GL_ARB_texture_rectangle", NULL
},
180 { "GL_ARB_point_sprite", NULL
},
181 { "GL_ARB_point_parameters", NULL
},
182 { "GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
183 { "GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
184 { "GL_ARB_window_pos", GL_ARB_window_pos_functions
},
185 { "GL_EXT_blend_color", GL_EXT_blend_color_functions
},
186 { "GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions
},
187 { "GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
188 { "GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
189 { "GL_EXT_blend_logic_op", NULL
},
190 { "GL_EXT_blend_subtract", NULL
},
191 { "GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
192 { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
193 { "GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
194 { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
195 { "GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions
},
196 { "GL_EXT_stencil_wrap", NULL
},
197 /* Do not enable this extension. It conflicts with GL_ATI_separate_stencil
198 * and 2.0's separate stencil, because mesa's computed _TestTwoSide will
199 * only reflect whether it's enabled through this extension, even if the
200 * application is using the other interfaces.
202 /*{ "GL_EXT_stencil_two_side", GL_EXT_stencil_two_side_functions },*/
203 { "GL_EXT_texture_edge_clamp", NULL
},
204 { "GL_EXT_texture_env_combine", NULL
},
205 { "GL_EXT_texture_env_dot3", NULL
},
206 { "GL_EXT_texture_filter_anisotropic", NULL
},
207 { "GL_EXT_texture_lod_bias", NULL
},
208 { "GL_EXT_texture_sRGB", NULL
},
209 { "GL_3DFX_texture_compression_FXT1", NULL
},
210 { "GL_APPLE_client_storage", NULL
},
211 { "GL_MESA_pack_invert", NULL
},
212 { "GL_MESA_ycbcr_texture", NULL
},
213 { "GL_NV_blend_square", NULL
},
214 { "GL_SGIS_generate_mipmap", NULL
},
215 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions
},
216 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions
},
217 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions
},
218 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions
},
219 { "GL_ARB_fragment_shader", NULL
},
220 { "GL_ARB_draw_buffers", NULL
},
224 const struct dri_extension ttm_extensions
[] = {
225 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions
},
226 {"GL_ARB_pixel_buffer_object", NULL
},
230 const struct dri_extension arb_oc_extension
=
231 { "GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions
};
234 * Initializes potential list of extensions if ctx == NULL, or actually enables
235 * extensions for a context.
237 void intelInitExtensions(GLcontext
*ctx
, GLboolean enable_imaging
)
239 struct intel_context
*intel
= ctx
?intel_context(ctx
):NULL
;
241 /* Disable imaging extension until convolution is working in teximage paths.
243 enable_imaging
= GL_FALSE
;
245 driInitExtensions(ctx
, card_extensions
, enable_imaging
);
247 if (intel
== NULL
|| intel
->ttm
)
248 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
250 if (intel
== NULL
|| intel
->intelScreen
->drmMinor
>= 8)
251 driInitSingleExtension(ctx
, &arb_oc_extension
);
254 static const struct dri_debug_control debug_control
[] =
256 { "fall", DEBUG_FALLBACKS
},
257 { "tex", DEBUG_TEXTURE
},
258 { "ioctl", DEBUG_IOCTL
},
259 { "prim", DEBUG_PRIMS
},
260 { "vert", DEBUG_VERTS
},
261 { "state", DEBUG_STATE
},
262 { "verb", DEBUG_VERBOSE
},
263 { "dri", DEBUG_DRI
},
264 { "dma", DEBUG_DMA
},
265 { "san", DEBUG_SANITY
},
266 { "sync", DEBUG_SYNC
},
267 { "sleep", DEBUG_SLEEP
},
268 { "pix", DEBUG_PIXEL
},
269 { "buf", DEBUG_BUFMGR
},
270 { "stats", DEBUG_STATS
},
271 { "tile", DEBUG_TILE
},
272 { "sing", DEBUG_SINGLE_THREAD
},
273 { "thre", DEBUG_SINGLE_THREAD
},
276 { "bat", DEBUG_BATCH
},
277 { "blit", DEBUG_BLIT
},
278 { "mip", DEBUG_MIPTREE
},
279 { "reg", DEBUG_REGION
},
280 { "fbo", DEBUG_FBO
},
285 static void intelInvalidateState( GLcontext
*ctx
, GLuint new_state
)
287 struct intel_context
*intel
= intel_context(ctx
);
289 _swrast_InvalidateState( ctx
, new_state
);
290 _swsetup_InvalidateState( ctx
, new_state
);
291 _vbo_InvalidateState( ctx
, new_state
);
292 _tnl_InvalidateState( ctx
, new_state
);
293 _tnl_invalidate_vertex_state( ctx
, new_state
);
295 intel
->NewGLState
|= new_state
;
297 if (intel
->vtbl
.invalidate_state
)
298 intel
->vtbl
.invalidate_state( intel
, new_state
);
302 void intelFlush( GLcontext
*ctx
)
304 struct intel_context
*intel
= intel_context( ctx
);
306 if (intel
->batch
->map
!= intel
->batch
->ptr
)
307 intel_batchbuffer_flush(intel
->batch
);
310 void intelFinish( GLcontext
*ctx
)
312 struct intel_context
*intel
= intel_context( ctx
);
315 if (intel
->batch
->last_fence
) {
316 dri_fence_wait(intel
->batch
->last_fence
);
317 dri_fence_unreference(intel
->batch
->last_fence
);
318 intel
->batch
->last_fence
= NULL
;
323 intelBeginQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
325 struct intel_context
*intel
= intel_context( ctx
);
327 .read_write
= MMIO_READ
,
328 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
332 intelFinish(&intel
->ctx
);
333 drmCommandWrite(intel
->driFd
, DRM_I830_MMIO
, &io
, sizeof(io
));
337 intelEndQuery(GLcontext
*ctx
, GLenum target
, struct gl_query_object
*q
)
339 struct intel_context
*intel
= intel_context( ctx
);
342 .read_write
= MMIO_READ
,
343 .reg
= MMIO_REGS_PS_DEPTH_COUNT
,
346 intelFinish(&intel
->ctx
);
347 drmCommandWrite(intel
->driFd
, DRM_I830_MMIO
, &io
, sizeof(io
));
348 q
->Result
= tmp
- q
->Result
;
353 /** Driver-specific fence emit implementation for the fake memory manager. */
355 intel_fence_emit(void *private)
357 struct intel_context
*intel
= (struct intel_context
*)private;
360 /* XXX: Need to emit a flush, if we haven't already (at least with the
361 * current batchbuffer implementation, we have).
364 fence
= intelEmitIrqLocked(intel
);
369 /** Driver-specific fence wait implementation for the fake memory manager. */
371 intel_fence_wait(void *private, unsigned int cookie
)
373 struct intel_context
*intel
= (struct intel_context
*)private;
375 intelWaitIrq(intel
, cookie
);
381 intel_init_bufmgr(struct intel_context
*intel
)
383 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
384 GLboolean ttm_disable
= getenv("INTEL_NO_TTM") != NULL
;
386 /* If we've got a new enough DDX that's initializing TTM and giving us
387 * object handles for the shared buffers, use that.
389 intel
->ttm
= GL_FALSE
;
391 intel
->intelScreen
->driScrnPriv
->ddx_version
.minor
>= 9 &&
392 intel
->intelScreen
->drmMinor
>= 11 &&
393 intel
->intelScreen
->front
.bo_handle
!= -1)
395 intel
->bufmgr
= intel_bufmgr_ttm_init(intel
->driFd
,
398 DRM_I915_FENCE_TYPE_RW
,
400 if (intel
->bufmgr
!= NULL
)
401 intel
->ttm
= GL_TRUE
;
403 /* Otherwise, use the classic buffer manager. */
404 if (intel
->bufmgr
== NULL
) {
406 fprintf(stderr
, "TTM buffer manager disabled. Using classic.\n");
408 fprintf(stderr
, "Failed to initialize TTM buffer manager. "
409 "Falling back to classic.\n");
412 if (intelScreen
->tex
.size
== 0) {
413 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
418 intel
->bufmgr
= dri_bufmgr_fake_init(intelScreen
->tex
.offset
,
419 intelScreen
->tex
.map
,
420 intelScreen
->tex
.size
,
430 void intelInitDriverFunctions( struct dd_function_table
*functions
)
432 _mesa_init_driver_functions( functions
);
434 functions
->Flush
= intelFlush
;
435 functions
->Finish
= intelFinish
;
436 functions
->GetString
= intelGetString
;
437 functions
->UpdateState
= intelInvalidateState
;
438 functions
->BeginQuery
= intelBeginQuery
;
439 functions
->EndQuery
= intelEndQuery
;
441 /* CopyPixels can be accelerated even with the current memory
444 if (!getenv("INTEL_NO_BLIT")) {
445 functions
->CopyPixels
= intelCopyPixels
;
446 functions
->Bitmap
= intelBitmap
;
449 intelInitTextureFuncs( functions
);
450 intelInitStateFuncs( functions
);
451 intelInitBufferFuncs( functions
);
454 GLboolean
intelInitContext( struct intel_context
*intel
,
455 const __GLcontextModes
*mesaVis
,
456 __DRIcontextPrivate
*driContextPriv
,
457 void *sharedContextPrivate
,
458 struct dd_function_table
*functions
)
460 GLcontext
*ctx
= &intel
->ctx
;
461 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
462 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
463 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*)sPriv
->private;
464 volatile drmI830Sarea
*saPriv
= (drmI830Sarea
*)
465 (((GLubyte
*)sPriv
->pSAREA
)+intelScreen
->sarea_priv_offset
);
467 if (!_mesa_initialize_context(&intel
->ctx
,
471 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__
);
475 driContextPriv
->driverPrivate
= intel
;
476 intel
->intelScreen
= intelScreen
;
477 intel
->driScreen
= sPriv
;
478 intel
->sarea
= saPriv
;
481 intel
->hHWContext
= driContextPriv
->hHWContext
;
482 intel
->driFd
= sPriv
->fd
;
483 intel
->driHwLock
= (drmLock
*) &sPriv
->pSAREA
->lock
;
485 intel
->maxBatchSize
= BATCH_SZ
;
487 if (!intel_init_bufmgr(intel
))
490 driParseConfigFiles (&intel
->optionCache
, &intelScreen
->optionCache
,
491 intel
->driScreen
->myNum
, "i965");
493 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
495 if (getenv("INTEL_STRICT_CONFORMANCE")) {
496 intel
->strict_conformance
= 1;
499 if (intel
->strict_conformance
) {
500 ctx
->Const
.MinLineWidth
= 1.0;
501 ctx
->Const
.MinLineWidthAA
= 1.0;
502 ctx
->Const
.MaxLineWidth
= 1.0;
503 ctx
->Const
.MaxLineWidthAA
= 1.0;
504 ctx
->Const
.LineWidthGranularity
= 1.0;
507 ctx
->Const
.MinLineWidth
= 1.0;
508 ctx
->Const
.MinLineWidthAA
= 1.0;
509 ctx
->Const
.MaxLineWidth
= 5.0;
510 ctx
->Const
.MaxLineWidthAA
= 5.0;
511 ctx
->Const
.LineWidthGranularity
= 0.5;
514 ctx
->Const
.MinPointSize
= 1.0;
515 ctx
->Const
.MinPointSizeAA
= 1.0;
516 ctx
->Const
.MaxPointSize
= 255.0;
517 ctx
->Const
.MaxPointSizeAA
= 3.0;
518 ctx
->Const
.PointSizeGranularity
= 1.0;
520 /* reinitialize the context point state.
521 * It depend on constants in __GLcontextRec::Const
523 _mesa_init_point(ctx
);
525 /* Initialize the software rasterizer and helper modules. */
526 _swrast_CreateContext( ctx
);
527 _vbo_CreateContext( ctx
);
528 _tnl_CreateContext( ctx
);
529 _swsetup_CreateContext( ctx
);
531 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
533 /* Configure swrast to match hardware characteristics: */
534 _swrast_allow_pixel_fog( ctx
, GL_FALSE
);
535 _swrast_allow_vertex_fog( ctx
, GL_TRUE
);
537 intel
->hw_stencil
= mesaVis
->stencilBits
&& mesaVis
->depthBits
== 24;
538 intel
->hw_stipple
= 1;
540 switch(mesaVis
->depthBits
) {
541 case 0: /* what to do in this case? */
543 intel
->polygon_offset_scale
= 1.0/0xffff;
546 intel
->polygon_offset_scale
= 2.0/0xffffff; /* req'd to pass glean */
553 /* Initialize swrast, tnl driver tables: */
554 intelInitSpanFuncs( ctx
);
556 if (!intel
->intelScreen
->irq_active
) {
557 _mesa_printf("IRQs not active. Exiting\n");
560 intelInitExtensions(ctx
, GL_TRUE
);
562 INTEL_DEBUG
= driParseDebugString( getenv( "INTEL_DEBUG" ),
564 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
565 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
567 intel_recreate_static_regions(intel
);
569 intel_bufferobj_init( intel
);
570 intel_fbo_init( intel
);
572 intel
->batch
= intel_batchbuffer_alloc( intel
);
573 intel
->last_swap_fence
= NULL
;
574 intel
->first_swap_fence
= NULL
;
576 if (intel
->ctx
.Mesa_DXTn
) {
577 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
578 _mesa_enable_extension( ctx
, "GL_S3_s3tc" );
580 else if (driQueryOptionb (&intel
->optionCache
, "force_s3tc_enable")) {
581 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
584 /* driInitTextureObjects( ctx, & intel->swapped, */
585 /* DRI_TEXMGR_DO_TEXTURE_1D | */
586 /* DRI_TEXMGR_DO_TEXTURE_2D | */
587 /* DRI_TEXMGR_DO_TEXTURE_RECT ); */
589 /* Force all software fallbacks */
590 if (driQueryOptionb(&intel
->optionCache
, "no_rast")) {
591 fprintf(stderr
, "disabling 3D rasterization\n");
595 /* Disable all hardware rendering (skip emitting batches and fences/waits
598 intel
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
603 void intelDestroyContext(__DRIcontextPrivate
*driContextPriv
)
605 struct intel_context
*intel
= (struct intel_context
*) driContextPriv
->driverPrivate
;
607 assert(intel
); /* should never be null */
609 GLboolean release_texture_heaps
;
612 intel
->vtbl
.destroy( intel
);
614 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
615 _swsetup_DestroyContext (&intel
->ctx
);
616 _tnl_DestroyContext (&intel
->ctx
);
617 _vbo_DestroyContext (&intel
->ctx
);
619 _swrast_DestroyContext (&intel
->ctx
);
620 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
621 intel_batchbuffer_free(intel
->batch
);
624 if (intel
->last_swap_fence
) {
625 dri_fence_wait(intel
->last_swap_fence
);
626 dri_fence_unreference(intel
->last_swap_fence
);
627 intel
->last_swap_fence
= NULL
;
629 if (intel
->first_swap_fence
) {
630 dri_fence_wait(intel
->first_swap_fence
);
631 dri_fence_unreference(intel
->first_swap_fence
);
632 intel
->first_swap_fence
= NULL
;
635 if ( release_texture_heaps
) {
636 /* This share group is about to go away, free our private
637 * texture object data.
640 /* XXX: destroy the shared bufmgr struct here?
644 /* free the Mesa context */
645 intel
->ctx
.VertexProgram
.Current
= NULL
;
646 intel
->ctx
.FragmentProgram
.Current
= NULL
;
647 _mesa_destroy_context(&intel
->ctx
);
650 driContextPriv
->driverPrivate
= NULL
;
653 GLboolean
intelUnbindContext(__DRIcontextPrivate
*driContextPriv
)
658 GLboolean
intelMakeCurrent(__DRIcontextPrivate
*driContextPriv
,
659 __DRIdrawablePrivate
*driDrawPriv
,
660 __DRIdrawablePrivate
*driReadPriv
)
663 if (driContextPriv
) {
664 struct intel_context
*intel
=
665 (struct intel_context
*) driContextPriv
->driverPrivate
;
666 struct intel_framebuffer
*intel_fb
=
667 (struct intel_framebuffer
*) driDrawPriv
->driverPrivate
;
668 GLframebuffer
*readFb
= (GLframebuffer
*) driReadPriv
->driverPrivate
;
670 /* XXX FBO temporary fix-ups! */
671 /* if the renderbuffers don't have regions, init them from the context.
672 * They will be unreferenced when the renderbuffer is destroyed.
675 struct intel_renderbuffer
*irbDepth
676 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
677 struct intel_renderbuffer
*irbStencil
678 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
680 if (intel_fb
->color_rb
[0]) {
681 intel_renderbuffer_set_region(intel_fb
->color_rb
[0],
682 intel
->front_region
);
684 if (intel_fb
->color_rb
[1]) {
685 intel_renderbuffer_set_region(intel_fb
->color_rb
[1],
688 if (intel_fb
->color_rb
[2]) {
689 intel_renderbuffer_set_region(intel_fb
->color_rb
[2],
690 intel
->third_region
);
693 intel_renderbuffer_set_region(irbDepth
, intel
->depth_region
);
696 intel_renderbuffer_set_region(irbStencil
, intel
->depth_region
);
700 /* set GLframebuffer size to match window, if needed */
701 driUpdateFramebufferSize(&intel
->ctx
, driDrawPriv
);
703 if (intel
->driReadDrawable
!= driReadPriv
) {
704 intel
->driReadDrawable
= driReadPriv
;
707 if ( intel
->driDrawable
!= driDrawPriv
) {
708 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
709 driDrawPriv
->vblFlags
= (intel
->intelScreen
->irq_active
!= 0)
710 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
711 : VBLANK_FLAG_NO_IRQ
;
712 driDrawableInitVBlank( driDrawPriv
);
715 intel
->driDrawable
= driDrawPriv
;
716 intelWindowMoved( intel
);
717 /* Shouldn't the readbuffer be stored also? */
720 _mesa_make_current(&intel
->ctx
,
724 intel_draw_buffer(&intel
->ctx
, &intel_fb
->Base
);
726 _mesa_make_current(NULL
, NULL
, NULL
);
733 static void intelContendedLock( struct intel_context
*intel
, GLuint flags
)
735 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
736 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
737 volatile drmI830Sarea
* sarea
= intel
->sarea
;
738 int me
= intel
->hHWContext
;
740 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
742 /* If the window moved, may need to set a new cliprect now.
744 * NOTE: This releases and regains the hw lock, so all state
745 * checking must be done *after* this call:
748 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
755 if (sarea
->ctxOwner
!= me
) {
756 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
757 fprintf(stderr
, "Lost Context: sarea->ctxOwner %x me %x\n",
758 sarea
->ctxOwner
, me
);
760 sarea
->ctxOwner
= me
;
763 /* If the last consumer of the texture memory wasn't us, notify the fake
764 * bufmgr and record the new owner. We should have the memory shared
765 * between contexts of a single fake bufmgr, but this will at least make
766 * things correct for now.
768 if (!intel
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
769 sarea
->texAge
= intel
->hHWContext
;
770 dri_bufmgr_fake_contended_lock_take(intel
->bufmgr
);
771 if (INTEL_DEBUG
& DEBUG_BATCH
)
772 intel_decode_context_reset();
773 if (INTEL_DEBUG
& DEBUG_BUFMGR
) {
774 fprintf(stderr
, "Lost Textures: sarea->texAge %x hw context %x\n",
775 sarea
->ctxOwner
, intel
->hHWContext
);
781 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
782 intelWindowMoved( intel
);
783 intel
->lastStamp
= dPriv
->lastStamp
;
787 _glthread_DECLARE_STATIC_MUTEX(lockMutex
);
789 /* Lock the hardware and validate our state.
791 void LOCK_HARDWARE( struct intel_context
*intel
)
795 _glthread_LOCK_MUTEX(lockMutex
);
796 assert(!intel
->locked
);
799 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
800 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
802 intelContendedLock( intel
, 0 );
809 /* Unlock the hardware using the global current context
811 void UNLOCK_HARDWARE( struct intel_context
*intel
)
813 intel
->vtbl
.note_unlock( intel
);
816 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
817 _glthread_UNLOCK_MUTEX(lockMutex
);
820 * Nothing should be left in batch outside of LOCK/UNLOCK which references
823 assert(intel
->batch
->cliprect_mode
!= REFERENCES_CLIPRECTS
);