1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
45 #include "drivers/common/driverfuncs.h"
47 #include "intel_screen.h"
51 #include "intel_buffers.h"
52 #include "intel_tex.h"
53 #include "intel_span.h"
54 #include "intel_tris.h"
55 #include "intel_ioctl.h"
56 #include "intel_batchbuffer.h"
57 #include "intel_blit.h"
58 #include "intel_pixel.h"
59 #include "intel_regions.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_fbo.h"
62 #include "intel_decode.h"
63 #include "intel_bufmgr_ttm.h"
65 #include "drirenderbuffer.h"
68 #include "xmlpool.h" /* for symbolic values of enum-type options */
70 int INTEL_DEBUG
= (0);
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_EXT_blend_color
80 #define need_GL_EXT_blend_equation_separate
81 #define need_GL_EXT_blend_func_separate
82 #define need_GL_EXT_blend_minmax
83 #define need_GL_EXT_cull_vertex
84 #define need_GL_EXT_fog_coord
85 #define need_GL_EXT_framebuffer_object
86 #define need_GL_EXT_multi_draw_arrays
87 #define need_GL_EXT_secondary_color
88 #define need_GL_NV_vertex_program
89 #include "extension_helper.h"
92 #define DRIVER_DATE "20061102"
94 _glthread_Mutex lockMutex
;
95 static GLboolean lockMutexInit
= GL_FALSE
;
98 static const GLubyte
*
99 intelGetString(GLcontext
* ctx
, GLenum name
)
102 static char buffer
[128];
106 return (GLubyte
*) "Tungsten Graphics, Inc";
110 switch (intel_context(ctx
)->intelScreen
->deviceID
) {
112 chipset
= "Intel(R) 845G";
114 case PCI_CHIP_I830_M
:
115 chipset
= "Intel(R) 830M";
117 case PCI_CHIP_I855_GM
:
118 chipset
= "Intel(R) 852GM/855GM";
120 case PCI_CHIP_I865_G
:
121 chipset
= "Intel(R) 865G";
123 case PCI_CHIP_I915_G
:
124 chipset
= "Intel(R) 915G";
126 case PCI_CHIP_I915_GM
:
127 chipset
= "Intel(R) 915GM";
129 case PCI_CHIP_I945_G
:
130 chipset
= "Intel(R) 945G";
132 case PCI_CHIP_I945_GM
:
133 chipset
= "Intel(R) 945GM";
135 case PCI_CHIP_I945_GME
:
136 chipset
= "Intel(R) 945GME";
139 chipset
= "Intel(R) G33";
142 chipset
= "Intel(R) Q35";
145 chipset
= "Intel(R) Q33";
148 chipset
= "Unknown Intel Chipset";
152 (void) driGetRendererString(buffer
, chipset
, DRIVER_DATE
, 0);
153 return (GLubyte
*) buffer
;
162 * Extension strings exported by the intel driver.
164 const struct dri_extension card_extensions
[] = {
165 {"GL_ARB_multisample", GL_ARB_multisample_functions
},
166 {"GL_ARB_multitexture", NULL
},
167 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
168 {"GL_ARB_texture_border_clamp", NULL
},
169 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
170 {"GL_ARB_texture_cube_map", NULL
},
171 {"GL_ARB_texture_env_add", NULL
},
172 {"GL_ARB_texture_env_combine", NULL
},
173 {"GL_ARB_texture_env_dot3", NULL
},
174 {"GL_ARB_texture_mirrored_repeat", NULL
},
175 {"GL_ARB_texture_rectangle", NULL
},
176 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
177 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
178 {"GL_ARB_window_pos", GL_ARB_window_pos_functions
},
179 {"GL_EXT_blend_color", GL_EXT_blend_color_functions
},
180 {"GL_EXT_blend_equation_separate",
181 GL_EXT_blend_equation_separate_functions
},
182 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
183 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
184 {"GL_EXT_blend_subtract", NULL
},
185 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
186 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
187 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
188 #if 1 /* XXX FBO temporary? */
189 {"GL_EXT_packed_depth_stencil", NULL
},
191 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
192 {"GL_EXT_stencil_wrap", NULL
},
193 {"GL_EXT_texture_edge_clamp", NULL
},
194 {"GL_EXT_texture_env_combine", NULL
},
195 {"GL_EXT_texture_env_dot3", NULL
},
196 {"GL_EXT_texture_filter_anisotropic", NULL
},
197 {"GL_EXT_texture_lod_bias", NULL
},
198 {"GL_3DFX_texture_compression_FXT1", NULL
},
199 {"GL_APPLE_client_storage", NULL
},
200 {"GL_MESA_pack_invert", NULL
},
201 {"GL_MESA_ycbcr_texture", NULL
},
202 {"GL_NV_blend_square", NULL
},
203 {"GL_NV_vertex_program", GL_NV_vertex_program_functions
},
204 {"GL_NV_vertex_program1_1", NULL
},
205 { "GL_SGIS_generate_mipmap", NULL
},
209 const struct dri_extension ttm_extensions
[] = {
210 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions
},
211 {"GL_ARB_pixel_buffer_object", NULL
},
215 extern const struct tnl_pipeline_stage _intel_render_stage
;
217 static const struct tnl_pipeline_stage
*intel_pipeline
[] = {
218 &_tnl_vertex_transform_stage
,
219 &_tnl_vertex_cull_stage
,
220 &_tnl_normal_transform_stage
,
221 &_tnl_lighting_stage
,
222 &_tnl_fog_coordinate_stage
,
224 &_tnl_texture_transform_stage
,
225 &_tnl_point_attenuation_stage
,
226 &_tnl_vertex_program_stage
,
228 &_intel_render_stage
, /* ADD: unclipped rastersetup-to-dma */
235 static const struct dri_debug_control debug_control
[] = {
236 {"tex", DEBUG_TEXTURE
},
237 {"state", DEBUG_STATE
},
238 {"ioctl", DEBUG_IOCTL
},
239 {"blit", DEBUG_BLIT
},
240 {"mip", DEBUG_MIPTREE
},
241 {"fall", DEBUG_FALLBACKS
},
242 {"verb", DEBUG_VERBOSE
},
243 {"bat", DEBUG_BATCH
},
244 {"pix", DEBUG_PIXEL
},
245 {"buf", DEBUG_BUFMGR
},
246 {"reg", DEBUG_REGION
},
248 {"lock", DEBUG_LOCK
},
249 {"sync", DEBUG_SYNC
},
255 intelInvalidateState(GLcontext
* ctx
, GLuint new_state
)
257 _swrast_InvalidateState(ctx
, new_state
);
258 _swsetup_InvalidateState(ctx
, new_state
);
259 _vbo_InvalidateState(ctx
, new_state
);
260 _tnl_InvalidateState(ctx
, new_state
);
261 _tnl_invalidate_vertex_state(ctx
, new_state
);
262 intel_context(ctx
)->NewGLState
|= new_state
;
267 intelFlush(GLcontext
* ctx
)
269 struct intel_context
*intel
= intel_context(ctx
);
274 INTEL_FIREVERTICES(intel
);
276 if (intel
->batch
->map
!= intel
->batch
->ptr
)
277 intel_batchbuffer_flush(intel
->batch
);
279 /* XXX: Need to do an MI_FLUSH here.
284 intelFinish(GLcontext
* ctx
)
286 struct intel_context
*intel
= intel_context(ctx
);
288 if (intel
->batch
->last_fence
) {
289 dri_fence_wait(intel
->batch
->last_fence
);
290 dri_fence_unreference(intel
->batch
->last_fence
);
291 intel
->batch
->last_fence
= NULL
;
295 /** Driver-specific fence emit implementation for the fake memory manager. */
297 intel_fence_emit(void *private)
299 struct intel_context
*intel
= (struct intel_context
*)private;
302 /* XXX: Need to emit a flush, if we haven't already (at least with the
303 * current batchbuffer implementation, we have).
306 fence
= intelEmitIrqLocked(intel
);
311 /** Driver-specific fence wait implementation for the fake memory manager. */
313 intel_fence_wait(void *private, unsigned int cookie
)
315 struct intel_context
*intel
= (struct intel_context
*)private;
317 intelWaitIrq(intel
, cookie
);
323 intel_init_bufmgr(struct intel_context
*intel
)
325 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
326 GLboolean ttm_disable
= getenv("INTEL_NO_TTM") != NULL
;
328 /* If we've got a new enough DDX that's initializing TTM and giving us
329 * object handles for the shared buffers, use that.
331 intel
->ttm
= GL_FALSE
;
333 intel
->intelScreen
->driScrnPriv
->ddx_version
.minor
>= 9 &&
334 intel
->intelScreen
->drmMinor
>= 11 &&
335 intel
->intelScreen
->front
.bo_handle
!= -1)
337 intel
->bufmgr
= intel_bufmgr_ttm_init(intel
->driFd
,
340 DRM_I915_FENCE_TYPE_RW
,
342 if (intel
->bufmgr
!= NULL
)
343 intel
->ttm
= GL_TRUE
;
345 /* Otherwise, use the classic buffer manager. */
346 if (intel
->bufmgr
== NULL
) {
348 fprintf(stderr
, "TTM buffer manager disabled. Using classic.\n");
350 fprintf(stderr
, "Failed to initialize TTM buffer manager. "
351 "Falling back to classic.\n");
354 if (intelScreen
->tex
.size
== 0) {
355 fprintf(stderr
, "[%s:%u] Error initializing buffer manager.\n",
360 intel
->bufmgr
= dri_bufmgr_fake_init(intelScreen
->tex
.offset
,
361 intelScreen
->tex
.map
,
362 intelScreen
->tex
.size
,
372 intelInitDriverFunctions(struct dd_function_table
*functions
)
374 _mesa_init_driver_functions(functions
);
376 functions
->Flush
= intelFlush
;
377 functions
->Finish
= intelFinish
;
378 functions
->GetString
= intelGetString
;
379 functions
->UpdateState
= intelInvalidateState
;
380 functions
->CopyColorTable
= _swrast_CopyColorTable
;
381 functions
->CopyColorSubTable
= _swrast_CopyColorSubTable
;
382 functions
->CopyConvolutionFilter1D
= _swrast_CopyConvolutionFilter1D
;
383 functions
->CopyConvolutionFilter2D
= _swrast_CopyConvolutionFilter2D
;
385 intelInitTextureFuncs(functions
);
386 intelInitPixelFuncs(functions
);
387 intelInitStateFuncs(functions
);
388 intelInitBufferFuncs(functions
);
393 intelInitContext(struct intel_context
*intel
,
394 const __GLcontextModes
* mesaVis
,
395 __DRIcontextPrivate
* driContextPriv
,
396 void *sharedContextPrivate
,
397 struct dd_function_table
*functions
)
399 GLcontext
*ctx
= &intel
->ctx
;
400 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
401 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
402 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*) sPriv
->private;
403 drmI830Sarea
*saPriv
= (drmI830Sarea
*)
404 (((GLubyte
*) sPriv
->pSAREA
) + intelScreen
->sarea_priv_offset
);
407 if (!_mesa_initialize_context(&intel
->ctx
,
409 functions
, (void *) intel
))
412 driContextPriv
->driverPrivate
= intel
;
413 intel
->intelScreen
= intelScreen
;
414 intel
->driScreen
= sPriv
;
415 intel
->sarea
= saPriv
;
418 intel
->hHWContext
= driContextPriv
->hHWContext
;
419 intel
->driFd
= sPriv
->fd
;
420 intel
->driHwLock
= sPriv
->lock
;
422 intel
->width
= intelScreen
->width
;
423 intel
->height
= intelScreen
->height
;
425 if (intelScreen
->deviceID
== PCI_CHIP_I865_G
)
426 intel
->maxBatchSize
= 4096;
428 intel
->maxBatchSize
= BATCH_SZ
;
430 if (!intel_init_bufmgr(intel
))
433 if (!lockMutexInit
) {
434 lockMutexInit
= GL_TRUE
;
435 _glthread_INIT_MUTEX(lockMutex
);
438 driParseConfigFiles(&intel
->optionCache
, &intelScreen
->optionCache
,
439 intel
->driScreen
->myNum
, "i915");
441 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
443 /* This doesn't yet catch all non-conformant rendering, but it's a
446 if (getenv("INTEL_STRICT_CONFORMANCE")) {
447 intel
->strict_conformance
= 1;
450 ctx
->Const
.MinLineWidth
= 1.0;
451 ctx
->Const
.MinLineWidthAA
= 1.0;
452 ctx
->Const
.MaxLineWidth
= 3.0;
453 ctx
->Const
.MaxLineWidthAA
= 3.0;
454 ctx
->Const
.LineWidthGranularity
= 1.0;
456 ctx
->Const
.MinPointSize
= 1.0;
457 ctx
->Const
.MinPointSizeAA
= 1.0;
458 ctx
->Const
.MaxPointSize
= 255.0;
459 ctx
->Const
.MaxPointSizeAA
= 3.0;
460 ctx
->Const
.PointSizeGranularity
= 1.0;
462 /* reinitialize the context point state.
463 * It depend on constants in __GLcontextRec::Const
465 _mesa_init_point(ctx
);
467 ctx
->Const
.MaxColorAttachments
= 4; /* XXX FBO: review this */
469 /* Initialize the software rasterizer and helper modules. */
470 _swrast_CreateContext(ctx
);
471 _vbo_CreateContext(ctx
);
472 _tnl_CreateContext(ctx
);
473 _swsetup_CreateContext(ctx
);
475 /* Install the customized pipeline: */
476 _tnl_destroy_pipeline(ctx
);
477 _tnl_install_pipeline(ctx
, intel_pipeline
);
479 /* Configure swrast to match hardware characteristics: */
480 _swrast_allow_pixel_fog(ctx
, GL_FALSE
);
481 _swrast_allow_vertex_fog(ctx
, GL_TRUE
);
483 intel
->hw_stipple
= 1;
485 /* XXX FBO: this doesn't seem to be used anywhere */
486 switch (mesaVis
->depthBits
) {
487 case 0: /* what to do in this case? */
489 intel
->polygon_offset_scale
= 1.0;
492 intel
->polygon_offset_scale
= 2.0; /* req'd to pass glean */
499 /* Initialize swrast, tnl driver tables: */
500 intelInitSpanFuncs(ctx
);
501 intelInitTriFuncs(ctx
);
504 intel
->RenderIndex
= ~0;
506 fthrottle_mode
= driQueryOptioni(&intel
->optionCache
, "fthrottle_mode");
507 intel
->irqsEmitted
= 0;
509 intel
->do_irqs
= (intel
->intelScreen
->irq_active
&&
510 fthrottle_mode
== DRI_CONF_FTHROTTLE_IRQS
);
512 intel
->do_usleeps
= (fthrottle_mode
== DRI_CONF_FTHROTTLE_USLEEPS
);
514 _math_matrix_ctr(&intel
->ViewportMatrix
);
516 /* Disable imaging extension until convolution is working in
519 driInitExtensions(ctx
, card_extensions
,
524 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
526 if (!sPriv
->dri2
.enabled
)
527 intel_recreate_static_regions(intel
);
529 intel
->batch
= intel_batchbuffer_alloc(intel
);
530 intel
->last_swap_fence
= NULL
;
531 intel
->first_swap_fence
= NULL
;
533 intel_bufferobj_init(intel
);
534 intel_fbo_init(intel
);
536 if (intel
->ctx
.Mesa_DXTn
) {
537 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
538 _mesa_enable_extension(ctx
, "GL_S3_s3tc");
540 else if (driQueryOptionb(&intel
->optionCache
, "force_s3tc_enable")) {
541 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
544 intel
->prim
.primitive
= ~0;
547 INTEL_DEBUG
= driParseDebugString(getenv("INTEL_DEBUG"), debug_control
);
548 if (INTEL_DEBUG
& DEBUG_BUFMGR
)
549 dri_bufmgr_set_debug(intel
->bufmgr
, GL_TRUE
);
552 if (driQueryOptionb(&intel
->optionCache
, "no_rast")) {
553 fprintf(stderr
, "disabling 3D rasterization\n");
554 FALLBACK(intel
, INTEL_FALLBACK_USER
, 1);
557 /* Disable all hardware rendering (skip emitting batches and fences/waits
560 intel
->no_hw
= getenv("INTEL_NO_HW") != NULL
;
566 intelDestroyContext(__DRIcontextPrivate
* driContextPriv
)
568 struct intel_context
*intel
=
569 (struct intel_context
*) driContextPriv
->driverPrivate
;
571 assert(intel
); /* should never be null */
573 GLboolean release_texture_heaps
;
575 INTEL_FIREVERTICES(intel
);
577 intel
->vtbl
.destroy(intel
);
579 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
580 _swsetup_DestroyContext(&intel
->ctx
);
581 _tnl_DestroyContext(&intel
->ctx
);
582 _vbo_DestroyContext(&intel
->ctx
);
584 _swrast_DestroyContext(&intel
->ctx
);
585 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
587 intel_batchbuffer_free(intel
->batch
);
589 if (intel
->last_swap_fence
) {
590 dri_fence_wait(intel
->last_swap_fence
);
591 dri_fence_unreference(intel
->last_swap_fence
);
592 intel
->last_swap_fence
= NULL
;
594 if (intel
->first_swap_fence
) {
595 dri_fence_wait(intel
->first_swap_fence
);
596 dri_fence_unreference(intel
->first_swap_fence
);
597 intel
->first_swap_fence
= NULL
;
600 if (release_texture_heaps
) {
601 /* This share group is about to go away, free our private
602 * texture object data.
604 if (INTEL_DEBUG
& DEBUG_TEXTURE
)
605 fprintf(stderr
, "do something to free texture heaps\n");
608 /* free the Mesa context */
609 _mesa_free_context_data(&intel
->ctx
);
611 dri_bufmgr_destroy(intel
->bufmgr
);
616 intelUnbindContext(__DRIcontextPrivate
* driContextPriv
)
622 intelMakeCurrent(__DRIcontextPrivate
* driContextPriv
,
623 __DRIdrawablePrivate
* driDrawPriv
,
624 __DRIdrawablePrivate
* driReadPriv
)
627 if (driContextPriv
) {
628 struct intel_context
*intel
=
629 (struct intel_context
*) driContextPriv
->driverPrivate
;
630 struct intel_framebuffer
*intel_fb
=
631 (struct intel_framebuffer
*) driDrawPriv
->driverPrivate
;
632 GLframebuffer
*readFb
= (GLframebuffer
*) driReadPriv
->driverPrivate
;
635 /* XXX FBO temporary fix-ups! */
636 /* if the renderbuffers don't have regions, init them from the context */
637 if (!driContextPriv
->driScreenPriv
->dri2
.enabled
) {
638 struct intel_renderbuffer
*irbDepth
639 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
640 struct intel_renderbuffer
*irbStencil
641 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
643 if (intel_fb
->color_rb
[0]) {
644 intel_renderbuffer_set_region(intel_fb
->color_rb
[0],
645 intel
->front_region
);
647 if (intel_fb
->color_rb
[1]) {
648 intel_renderbuffer_set_region(intel_fb
->color_rb
[1],
651 if (intel_fb
->color_rb
[2]) {
652 intel_renderbuffer_set_region(intel_fb
->color_rb
[2],
653 intel
->third_region
);
656 intel_renderbuffer_set_region(irbDepth
, intel
->depth_region
);
659 intel_renderbuffer_set_region(irbStencil
, intel
->depth_region
);
663 /* set GLframebuffer size to match window, if needed */
664 driUpdateFramebufferSize(&intel
->ctx
, driDrawPriv
);
666 if (driReadPriv
!= driDrawPriv
) {
667 driUpdateFramebufferSize(&intel
->ctx
, driReadPriv
);
670 _mesa_make_current(&intel
->ctx
, &intel_fb
->Base
, readFb
);
672 /* The drawbuffer won't always be updated by _mesa_make_current:
674 if (intel
->ctx
.DrawBuffer
== &intel_fb
->Base
) {
676 if (intel
->driDrawable
!= driDrawPriv
) {
677 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
680 driDrawPriv
->vblFlags
= (intel
->intelScreen
->irq_active
!= 0)
681 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
682 : VBLANK_FLAG_NO_IRQ
;
684 (*dri_interface
->getUST
) (&intel_fb
->swap_ust
);
685 driDrawableInitVBlank(driDrawPriv
);
686 intel_fb
->vbl_waited
= driDrawPriv
->vblSeq
;
688 for (i
= 0; i
< (intel
->intelScreen
->third
.handle
? 3 : 2); i
++) {
689 if (intel_fb
->color_rb
[i
])
690 intel_fb
->color_rb
[i
]->vbl_pending
= driDrawPriv
->vblSeq
;
693 intel
->driDrawable
= driDrawPriv
;
694 intelWindowMoved(intel
);
697 intel_draw_buffer(&intel
->ctx
, &intel_fb
->Base
);
701 _mesa_make_current(NULL
, NULL
, NULL
);
708 intelContendedLock(struct intel_context
*intel
, GLuint flags
)
710 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
711 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
712 drmI830Sarea
*sarea
= intel
->sarea
;
713 int drawable_changed
= 0;
715 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
717 if (INTEL_DEBUG
& DEBUG_LOCK
)
718 _mesa_printf("%s - got contended lock\n", __progname
);
720 /* If the window moved, may need to set a new cliprect now.
722 * NOTE: This releases and regains the hw lock, so all state
723 * checking must be done *after* this call:
726 if (sPriv
->dri2
.enabled
)
727 drawable_changed
= __driParseEvents(sPriv
, dPriv
);
729 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
732 /* If the last consumer of the texture memory wasn't us, notify the fake
733 * bufmgr and record the new owner. We should have the memory shared
734 * between contexts of a single fake bufmgr, but this will at least make
735 * things correct for now.
737 if (!intel
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
738 sarea
->texAge
= intel
->hHWContext
;
739 dri_bufmgr_fake_contended_lock_take(intel
->bufmgr
);
740 if (INTEL_DEBUG
& DEBUG_BATCH
)
741 intel_decode_context_reset();
744 if (!sPriv
->dri2
.enabled
) {
745 if (sarea
->width
!= intel
->width
||
746 sarea
->height
!= intel
->height
) {
747 int numClipRects
= intel
->numClipRects
;
750 * FIXME: Really only need to do this when drawing to a
751 * common back- or front buffer.
755 * This will essentially drop the outstanding batchbuffer on the floor.
757 intel
->numClipRects
= 0;
760 _swrast_flush(&intel
->ctx
);
762 INTEL_FIREVERTICES(intel
);
764 if (intel
->batch
->map
!= intel
->batch
->ptr
)
765 intel_batchbuffer_flush(intel
->batch
);
767 intel
->numClipRects
= numClipRects
;
769 /* force window update */
770 intel
->lastStamp
= 0;
772 intel
->width
= sarea
->width
;
773 intel
->height
= sarea
->height
;
778 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
779 intelWindowMoved(intel
);
780 intel
->lastStamp
= dPriv
->lastStamp
;
782 } else if (drawable_changed
) {
783 intelWindowMoved(intel
);
784 intel_draw_buffer(&intel
->ctx
, intel
->ctx
.DrawBuffer
);
790 /* Lock the hardware and validate our state.
792 void LOCK_HARDWARE( struct intel_context
*intel
)
794 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
796 struct intel_framebuffer
*intel_fb
= NULL
;
797 struct intel_renderbuffer
*intel_rb
= NULL
;
798 _glthread_LOCK_MUTEX(lockMutex
);
799 assert(!intel
->locked
);
801 if (intel
->driDrawable
) {
802 intel_fb
= intel
->driDrawable
->driverPrivate
;
806 intel_get_renderbuffer(&intel_fb
->Base
,
807 intel_fb
->Base
._ColorDrawBufferIndexes
[0]);
810 if (intel_rb
&& dPriv
->vblFlags
&&
811 !(dPriv
->vblFlags
& VBLANK_FLAG_NO_IRQ
) &&
812 (intel_fb
->vbl_waited
- intel_rb
->vbl_pending
) > (1<<23)) {
815 vbl
.request
.type
= DRM_VBLANK_ABSOLUTE
;
817 if ( dPriv
->vblFlags
& VBLANK_FLAG_SECONDARY
) {
818 vbl
.request
.type
|= DRM_VBLANK_SECONDARY
;
821 vbl
.request
.sequence
= intel_rb
->vbl_pending
;
822 drmWaitVBlank(intel
->driFd
, &vbl
);
823 intel_fb
->vbl_waited
= vbl
.reply
.sequence
;
826 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
827 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
832 intelContendedLock( intel
, 0 );
834 if (INTEL_DEBUG
& DEBUG_LOCK
)
835 _mesa_printf("%s - locked\n", __progname
);
839 /* Unlock the hardware using the global current context
841 void UNLOCK_HARDWARE( struct intel_context
*intel
)
845 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
847 _glthread_UNLOCK_MUTEX(lockMutex
);
849 if (INTEL_DEBUG
& DEBUG_LOCK
)
850 _mesa_printf("%s - unlocked\n", __progname
);
853 * Nothing should be left in batch outside of LOCK/UNLOCK which references
856 assert(intel
->batch
->cliprect_mode
!= REFERENCES_CLIPRECTS
);