1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_pipeline.h"
43 #include "tnl/t_vertex.h"
45 #include "drivers/common/driverfuncs.h"
47 #include "intel_screen.h"
51 #include "intel_buffers.h"
52 #include "intel_tex.h"
53 #include "intel_span.h"
54 #include "intel_tris.h"
55 #include "intel_ioctl.h"
56 #include "intel_batchbuffer.h"
57 #include "intel_blit.h"
58 #include "intel_pixel.h"
59 #include "intel_regions.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_fbo.h"
62 #include "intel_decode.h"
64 #include "drirenderbuffer.h"
67 #include "xmlpool.h" /* for symbolic values of enum-type options */
69 int INTEL_DEBUG
= (0);
72 #define need_GL_ARB_multisample
73 #define need_GL_ARB_point_parameters
74 #define need_GL_ARB_texture_compression
75 #define need_GL_ARB_vertex_buffer_object
76 #define need_GL_ARB_vertex_program
77 #define need_GL_ARB_window_pos
78 #define need_GL_EXT_blend_color
79 #define need_GL_EXT_blend_equation_separate
80 #define need_GL_EXT_blend_func_separate
81 #define need_GL_EXT_blend_minmax
82 #define need_GL_EXT_cull_vertex
83 #define need_GL_EXT_fog_coord
84 #define need_GL_EXT_framebuffer_object
85 #define need_GL_EXT_multi_draw_arrays
86 #define need_GL_EXT_secondary_color
87 #define need_GL_NV_vertex_program
88 #include "extension_helper.h"
91 #define DRIVER_DATE "20061102"
93 _glthread_Mutex lockMutex
;
94 static GLboolean lockMutexInit
= GL_FALSE
;
97 static const GLubyte
*
98 intelGetString(GLcontext
* ctx
, GLenum name
)
101 static char buffer
[128];
105 return (GLubyte
*) "Tungsten Graphics, Inc";
109 switch (intel_context(ctx
)->intelScreen
->deviceID
) {
111 chipset
= "Intel(R) 845G";
113 case PCI_CHIP_I830_M
:
114 chipset
= "Intel(R) 830M";
116 case PCI_CHIP_I855_GM
:
117 chipset
= "Intel(R) 852GM/855GM";
119 case PCI_CHIP_I865_G
:
120 chipset
= "Intel(R) 865G";
122 case PCI_CHIP_I915_G
:
123 chipset
= "Intel(R) 915G";
125 case PCI_CHIP_I915_GM
:
126 chipset
= "Intel(R) 915GM";
128 case PCI_CHIP_I945_G
:
129 chipset
= "Intel(R) 945G";
131 case PCI_CHIP_I945_GM
:
132 chipset
= "Intel(R) 945GM";
134 case PCI_CHIP_I945_GME
:
135 chipset
= "Intel(R) 945GME";
138 chipset
= "Intel(R) G33";
141 chipset
= "Intel(R) Q35";
144 chipset
= "Intel(R) Q33";
147 chipset
= "Unknown Intel Chipset";
151 (void) driGetRendererString(buffer
, chipset
, DRIVER_DATE
, 0);
152 return (GLubyte
*) buffer
;
161 * Extension strings exported by the intel driver.
163 const struct dri_extension card_extensions
[] = {
164 {"GL_ARB_multisample", GL_ARB_multisample_functions
},
165 {"GL_ARB_multitexture", NULL
},
166 {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions
},
167 {"GL_ARB_texture_border_clamp", NULL
},
168 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
169 {"GL_ARB_texture_cube_map", NULL
},
170 {"GL_ARB_texture_env_add", NULL
},
171 {"GL_ARB_texture_env_combine", NULL
},
172 {"GL_ARB_texture_env_dot3", NULL
},
173 {"GL_ARB_texture_mirrored_repeat", NULL
},
174 {"GL_ARB_texture_rectangle", NULL
},
175 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
176 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
177 {"GL_ARB_window_pos", GL_ARB_window_pos_functions
},
178 {"GL_EXT_blend_color", GL_EXT_blend_color_functions
},
179 {"GL_EXT_blend_equation_separate",
180 GL_EXT_blend_equation_separate_functions
},
181 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
182 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
183 {"GL_EXT_blend_subtract", NULL
},
184 {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions
},
185 {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions
},
186 {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions
},
187 #if 1 /* XXX FBO temporary? */
188 {"GL_EXT_packed_depth_stencil", NULL
},
190 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
191 {"GL_EXT_stencil_wrap", NULL
},
192 {"GL_EXT_texture_edge_clamp", NULL
},
193 {"GL_EXT_texture_env_combine", NULL
},
194 {"GL_EXT_texture_env_dot3", NULL
},
195 {"GL_EXT_texture_filter_anisotropic", NULL
},
196 {"GL_EXT_texture_lod_bias", NULL
},
197 {"GL_3DFX_texture_compression_FXT1", NULL
},
198 {"GL_APPLE_client_storage", NULL
},
199 {"GL_MESA_pack_invert", NULL
},
200 {"GL_MESA_ycbcr_texture", NULL
},
201 {"GL_NV_blend_square", NULL
},
202 {"GL_NV_vertex_program", GL_NV_vertex_program_functions
},
203 {"GL_NV_vertex_program1_1", NULL
},
204 /* { "GL_SGIS_generate_mipmap", NULL }, */
208 const struct dri_extension ttm_extensions
[] = {
209 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions
},
210 {"GL_ARB_pixel_buffer_object", NULL
},
214 extern const struct tnl_pipeline_stage _intel_render_stage
;
216 static const struct tnl_pipeline_stage
*intel_pipeline
[] = {
217 &_tnl_vertex_transform_stage
,
218 &_tnl_vertex_cull_stage
,
219 &_tnl_normal_transform_stage
,
220 &_tnl_lighting_stage
,
221 &_tnl_fog_coordinate_stage
,
223 &_tnl_texture_transform_stage
,
224 &_tnl_point_attenuation_stage
,
225 &_tnl_vertex_program_stage
,
227 &_intel_render_stage
, /* ADD: unclipped rastersetup-to-dma */
234 static const struct dri_debug_control debug_control
[] = {
235 {"tex", DEBUG_TEXTURE
},
236 {"state", DEBUG_STATE
},
237 {"ioctl", DEBUG_IOCTL
},
238 {"blit", DEBUG_BLIT
},
239 {"mip", DEBUG_MIPTREE
},
240 {"fall", DEBUG_FALLBACKS
},
241 {"verb", DEBUG_VERBOSE
},
242 {"bat", DEBUG_BATCH
},
243 {"pix", DEBUG_PIXEL
},
244 {"buf", DEBUG_BUFMGR
},
245 {"reg", DEBUG_REGION
},
247 {"lock", DEBUG_LOCK
},
253 intelInvalidateState(GLcontext
* ctx
, GLuint new_state
)
255 _swrast_InvalidateState(ctx
, new_state
);
256 _swsetup_InvalidateState(ctx
, new_state
);
257 _vbo_InvalidateState(ctx
, new_state
);
258 _tnl_InvalidateState(ctx
, new_state
);
259 _tnl_invalidate_vertex_state(ctx
, new_state
);
260 intel_context(ctx
)->NewGLState
|= new_state
;
265 intelFlush(GLcontext
* ctx
)
267 struct intel_context
*intel
= intel_context(ctx
);
272 INTEL_FIREVERTICES(intel
);
274 if (intel
->batch
->map
!= intel
->batch
->ptr
)
275 intel_batchbuffer_flush(intel
->batch
);
277 /* XXX: Need to do an MI_FLUSH here.
283 * Check if we need to rotate/warp the front color buffer to the
284 * rotated screen. We generally need to do this when we get a glFlush
285 * or glFinish after drawing to the front color buffer.
288 intelCheckFrontRotate(GLcontext
* ctx
)
290 struct intel_context
*intel
= intel_context(ctx
);
291 if (intel
->ctx
.DrawBuffer
->_ColorDrawBufferMask
[0] ==
292 BUFFER_BIT_FRONT_LEFT
) {
293 intelScreenPrivate
*screen
= intel
->intelScreen
;
294 if (screen
->current_rotation
!= 0) {
295 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
296 intelRotateWindow(intel
, dPriv
, BUFFER_BIT_FRONT_LEFT
);
303 * Called via glFlush.
306 intelglFlush(GLcontext
* ctx
)
309 intelCheckFrontRotate(ctx
);
313 intelFinish(GLcontext
* ctx
)
315 struct intel_context
*intel
= intel_context(ctx
);
317 if (intel
->batch
->last_fence
) {
318 dri_fence_wait(intel
->batch
->last_fence
);
319 dri_fence_unreference(intel
->batch
->last_fence
);
320 intel
->batch
->last_fence
= NULL
;
322 intelCheckFrontRotate(ctx
);
327 intelInitDriverFunctions(struct dd_function_table
*functions
)
329 _mesa_init_driver_functions(functions
);
331 functions
->Flush
= intelglFlush
;
332 functions
->Finish
= intelFinish
;
333 functions
->GetString
= intelGetString
;
334 functions
->UpdateState
= intelInvalidateState
;
335 functions
->CopyColorTable
= _swrast_CopyColorTable
;
336 functions
->CopyColorSubTable
= _swrast_CopyColorSubTable
;
337 functions
->CopyConvolutionFilter1D
= _swrast_CopyConvolutionFilter1D
;
338 functions
->CopyConvolutionFilter2D
= _swrast_CopyConvolutionFilter2D
;
340 intelInitTextureFuncs(functions
);
341 intelInitPixelFuncs(functions
);
342 intelInitStateFuncs(functions
);
343 intelInitBufferFuncs(functions
);
348 intelInitContext(struct intel_context
*intel
,
349 const __GLcontextModes
* mesaVis
,
350 __DRIcontextPrivate
* driContextPriv
,
351 void *sharedContextPrivate
,
352 struct dd_function_table
*functions
)
354 GLcontext
*ctx
= &intel
->ctx
;
355 GLcontext
*shareCtx
= (GLcontext
*) sharedContextPrivate
;
356 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
357 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*) sPriv
->private;
358 drmI830Sarea
*saPriv
= (drmI830Sarea
*)
359 (((GLubyte
*) sPriv
->pSAREA
) + intelScreen
->sarea_priv_offset
);
362 if (!_mesa_initialize_context(&intel
->ctx
,
364 functions
, (void *) intel
))
367 driContextPriv
->driverPrivate
= intel
;
368 intel
->intelScreen
= intelScreen
;
369 intel
->driScreen
= sPriv
;
370 intel
->sarea
= saPriv
;
372 intel
->width
= intelScreen
->width
;
373 intel
->height
= intelScreen
->height
;
374 intel
->current_rotation
= intelScreen
->current_rotation
;
376 if (!lockMutexInit
) {
377 lockMutexInit
= GL_TRUE
;
378 _glthread_INIT_MUTEX(lockMutex
);
381 driParseConfigFiles(&intel
->optionCache
, &intelScreen
->optionCache
,
382 intel
->driScreen
->myNum
, "i915");
384 ctx
->Const
.MaxTextureMaxAnisotropy
= 2.0;
386 /* This doesn't yet catch all non-conformant rendering, but it's a
389 if (getenv("INTEL_STRICT_CONFORMANCE")) {
390 intel
->strict_conformance
= 1;
393 ctx
->Const
.MinLineWidth
= 1.0;
394 ctx
->Const
.MinLineWidthAA
= 1.0;
395 ctx
->Const
.MaxLineWidth
= 3.0;
396 ctx
->Const
.MaxLineWidthAA
= 3.0;
397 ctx
->Const
.LineWidthGranularity
= 1.0;
399 ctx
->Const
.MinPointSize
= 1.0;
400 ctx
->Const
.MinPointSizeAA
= 1.0;
401 ctx
->Const
.MaxPointSize
= 255.0;
402 ctx
->Const
.MaxPointSizeAA
= 3.0;
403 ctx
->Const
.PointSizeGranularity
= 1.0;
405 /* reinitialize the context point state.
406 * It depend on constants in __GLcontextRec::Const
408 _mesa_init_point(ctx
);
410 ctx
->Const
.MaxColorAttachments
= 4; /* XXX FBO: review this */
412 /* Initialize the software rasterizer and helper modules. */
413 _swrast_CreateContext(ctx
);
414 _vbo_CreateContext(ctx
);
415 _tnl_CreateContext(ctx
);
416 _swsetup_CreateContext(ctx
);
418 /* Install the customized pipeline: */
419 _tnl_destroy_pipeline(ctx
);
420 _tnl_install_pipeline(ctx
, intel_pipeline
);
422 /* Configure swrast to match hardware characteristics: */
423 _swrast_allow_pixel_fog(ctx
, GL_FALSE
);
424 _swrast_allow_vertex_fog(ctx
, GL_TRUE
);
427 intel
->hHWContext
= driContextPriv
->hHWContext
;
428 intel
->driFd
= sPriv
->fd
;
429 intel
->driHwLock
= (drmLock
*) & sPriv
->pSAREA
->lock
;
431 intel
->hw_stipple
= 1;
433 /* XXX FBO: this doesn't seem to be used anywhere */
434 switch (mesaVis
->depthBits
) {
435 case 0: /* what to do in this case? */
437 intel
->polygon_offset_scale
= 1.0 / 0xffff;
440 intel
->polygon_offset_scale
= 2.0 / 0xffffff; /* req'd to pass glean */
447 /* Initialize swrast, tnl driver tables: */
448 intelInitSpanFuncs(ctx
);
449 intelInitTriFuncs(ctx
);
452 intel
->RenderIndex
= ~0;
454 fthrottle_mode
= driQueryOptioni(&intel
->optionCache
, "fthrottle_mode");
455 intel
->irqsEmitted
= 0;
457 intel
->do_irqs
= (intel
->intelScreen
->irq_active
&&
458 fthrottle_mode
== DRI_CONF_FTHROTTLE_IRQS
);
460 intel
->do_usleeps
= (fthrottle_mode
== DRI_CONF_FTHROTTLE_USLEEPS
);
462 _math_matrix_ctr(&intel
->ViewportMatrix
);
464 /* Disable imaging extension until convolution is working in
467 driInitExtensions(ctx
, card_extensions
,
471 if (intelScreen
->ttm
)
472 driInitExtensions(ctx
, ttm_extensions
, GL_FALSE
);
475 intel
->batch
= intel_batchbuffer_alloc(intel
);
476 intel
->last_swap_fence
= NULL
;
477 intel
->first_swap_fence
= NULL
;
479 intel_bufferobj_init(intel
);
480 intel_fbo_init(intel
);
482 if (intel
->ctx
.Mesa_DXTn
) {
483 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
484 _mesa_enable_extension(ctx
, "GL_S3_s3tc");
486 else if (driQueryOptionb(&intel
->optionCache
, "force_s3tc_enable")) {
487 _mesa_enable_extension(ctx
, "GL_EXT_texture_compression_s3tc");
490 intel
->prim
.primitive
= ~0;
494 INTEL_DEBUG
= driParseDebugString(getenv("INTEL_DEBUG"), debug_control
);
497 if (getenv("INTEL_NO_RAST")) {
498 fprintf(stderr
, "disabling 3D rasterization\n");
499 FALLBACK(intel
, INTEL_FALLBACK_USER
, 1);
506 intelDestroyContext(__DRIcontextPrivate
* driContextPriv
)
508 struct intel_context
*intel
=
509 (struct intel_context
*) driContextPriv
->driverPrivate
;
511 assert(intel
); /* should never be null */
513 GLboolean release_texture_heaps
;
515 INTEL_FIREVERTICES(intel
);
517 intel
->vtbl
.destroy(intel
);
519 release_texture_heaps
= (intel
->ctx
.Shared
->RefCount
== 1);
520 _swsetup_DestroyContext(&intel
->ctx
);
521 _tnl_DestroyContext(&intel
->ctx
);
522 _vbo_DestroyContext(&intel
->ctx
);
524 _swrast_DestroyContext(&intel
->ctx
);
525 intel
->Fallback
= 0; /* don't call _swrast_Flush later */
527 intel_batchbuffer_free(intel
->batch
);
529 if (intel
->last_swap_fence
) {
530 dri_fence_wait(intel
->last_swap_fence
);
531 dri_fence_unreference(intel
->last_swap_fence
);
532 intel
->last_swap_fence
= NULL
;
534 if (intel
->first_swap_fence
) {
535 dri_fence_wait(intel
->first_swap_fence
);
536 dri_fence_unreference(intel
->first_swap_fence
);
537 intel
->first_swap_fence
= NULL
;
541 if (release_texture_heaps
) {
542 /* This share group is about to go away, free our private
543 * texture object data.
545 if (INTEL_DEBUG
& DEBUG_TEXTURE
)
546 fprintf(stderr
, "do something to free texture heaps\n");
549 /* free the Mesa context */
550 _mesa_free_context_data(&intel
->ctx
);
555 intelUnbindContext(__DRIcontextPrivate
* driContextPriv
)
561 intelMakeCurrent(__DRIcontextPrivate
* driContextPriv
,
562 __DRIdrawablePrivate
* driDrawPriv
,
563 __DRIdrawablePrivate
* driReadPriv
)
566 if (driContextPriv
) {
567 struct intel_context
*intel
=
568 (struct intel_context
*) driContextPriv
->driverPrivate
;
569 struct intel_framebuffer
*intel_fb
=
570 (struct intel_framebuffer
*) driDrawPriv
->driverPrivate
;
571 GLframebuffer
*readFb
= (GLframebuffer
*) driReadPriv
->driverPrivate
;
574 /* XXX FBO temporary fix-ups! */
575 /* if the renderbuffers don't have regions, init them from the context */
577 struct intel_renderbuffer
*irbDepth
578 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_DEPTH
);
579 struct intel_renderbuffer
*irbStencil
580 = intel_get_renderbuffer(&intel_fb
->Base
, BUFFER_STENCIL
);
582 if (intel_fb
->color_rb
[0] && !intel_fb
->color_rb
[0]->region
) {
583 intel_region_reference(&intel_fb
->color_rb
[0]->region
,
584 intel
->intelScreen
->front_region
);
586 if (intel_fb
->color_rb
[1] && !intel_fb
->color_rb
[1]->region
) {
587 intel_region_reference(&intel_fb
->color_rb
[1]->region
,
588 intel
->intelScreen
->back_region
);
590 if (intel_fb
->color_rb
[2] && !intel_fb
->color_rb
[2]->region
) {
591 intel_region_reference(&intel_fb
->color_rb
[2]->region
,
592 intel
->intelScreen
->third_region
);
594 if (irbDepth
&& !irbDepth
->region
) {
595 intel_region_reference(&irbDepth
->region
, intel
->intelScreen
->depth_region
);
597 if (irbStencil
&& !irbStencil
->region
) {
598 intel_region_reference(&irbStencil
->region
, intel
->intelScreen
->depth_region
);
602 /* set GLframebuffer size to match window, if needed */
603 driUpdateFramebufferSize(&intel
->ctx
, driDrawPriv
);
605 if (driReadPriv
!= driDrawPriv
) {
606 driUpdateFramebufferSize(&intel
->ctx
, driReadPriv
);
609 _mesa_make_current(&intel
->ctx
, &intel_fb
->Base
, readFb
);
611 /* The drawbuffer won't always be updated by _mesa_make_current:
613 if (intel
->ctx
.DrawBuffer
== &intel_fb
->Base
) {
615 if (intel
->driDrawable
!= driDrawPriv
) {
616 if (driDrawPriv
->swap_interval
== (unsigned)-1) {
619 intel_fb
->vblank_flags
= (intel
->intelScreen
->irq_active
!= 0)
620 ? driGetDefaultVBlankFlags(&intel
->optionCache
)
621 : VBLANK_FLAG_NO_IRQ
;
623 (*dri_interface
->getUST
) (&intel_fb
->swap_ust
);
624 driDrawableInitVBlank(driDrawPriv
, intel_fb
->vblank_flags
,
626 intel_fb
->vbl_waited
= intel_fb
->vbl_seq
;
628 for (i
= 0; i
< (intel
->intelScreen
->third
.handle
? 3 : 2); i
++) {
629 if (intel_fb
->color_rb
[i
])
630 intel_fb
->color_rb
[i
]->vbl_pending
= intel_fb
->vbl_seq
;
633 intel
->driDrawable
= driDrawPriv
;
634 intelWindowMoved(intel
);
637 intel_draw_buffer(&intel
->ctx
, &intel_fb
->Base
);
641 _mesa_make_current(NULL
, NULL
, NULL
);
648 intelContendedLock(struct intel_context
*intel
, GLuint flags
)
650 __DRIdrawablePrivate
*dPriv
= intel
->driDrawable
;
651 __DRIscreenPrivate
*sPriv
= intel
->driScreen
;
652 intelScreenPrivate
*intelScreen
= (intelScreenPrivate
*) sPriv
->private;
653 drmI830Sarea
*sarea
= intel
->sarea
;
655 drmGetLock(intel
->driFd
, intel
->hHWContext
, flags
);
657 if (INTEL_DEBUG
& DEBUG_LOCK
)
658 _mesa_printf("%s - got contended lock\n", __progname
);
660 /* If the window moved, may need to set a new cliprect now.
662 * NOTE: This releases and regains the hw lock, so all state
663 * checking must be done *after* this call:
666 DRI_VALIDATE_DRAWABLE_INFO(sPriv
, dPriv
);
668 /* If the last consumer of the texture memory wasn't us, notify the fake
669 * bufmgr and record the new owner. We should have the memory shared
670 * between contexts of a single fake bufmgr, but this will at least make
671 * things correct for now.
673 if (!intel
->intelScreen
->ttm
&& sarea
->texAge
!= intel
->hHWContext
) {
674 sarea
->texAge
= intel
->hHWContext
;
675 dri_bufmgr_fake_contended_lock_take(intel
->intelScreen
->bufmgr
);
676 if (INTEL_DEBUG
& DEBUG_BATCH
)
677 intel_decode_context_reset();
680 if (sarea
->width
!= intelScreen
->width
||
681 sarea
->height
!= intelScreen
->height
||
682 sarea
->rotation
!= intelScreen
->current_rotation
) {
684 intelUpdateScreenRotation(sPriv
, sarea
);
687 if (sarea
->width
!= intel
->width
||
688 sarea
->height
!= intel
->height
||
689 sarea
->rotation
!= intel
->current_rotation
) {
690 int numClipRects
= intel
->numClipRects
;
693 * FIXME: Really only need to do this when drawing to a
694 * common back- or front buffer.
698 * This will essentially drop the outstanding batchbuffer on the floor.
700 intel
->numClipRects
= 0;
703 _swrast_flush(&intel
->ctx
);
705 INTEL_FIREVERTICES(intel
);
707 if (intel
->batch
->map
!= intel
->batch
->ptr
)
708 intel_batchbuffer_flush(intel
->batch
);
710 intel
->numClipRects
= numClipRects
;
712 /* force window update */
713 intel
->lastStamp
= 0;
715 intel
->width
= sarea
->width
;
716 intel
->height
= sarea
->height
;
717 intel
->current_rotation
= sarea
->rotation
;
722 if (dPriv
&& intel
->lastStamp
!= dPriv
->lastStamp
) {
723 intelWindowMoved(intel
);
724 intel
->lastStamp
= dPriv
->lastStamp
;
730 /* Lock the hardware and validate our state.
732 void LOCK_HARDWARE( struct intel_context
*intel
)
735 struct intel_framebuffer
*intel_fb
= NULL
;
736 struct intel_renderbuffer
*intel_rb
= NULL
;
737 _glthread_LOCK_MUTEX(lockMutex
);
738 assert(!intel
->locked
);
740 if (intel
->driDrawable
) {
741 intel_fb
= intel
->driDrawable
->driverPrivate
;
745 intel_get_renderbuffer(&intel_fb
->Base
,
746 intel_fb
->Base
._ColorDrawBufferMask
[0] ==
747 BUFFER_BIT_FRONT_LEFT
? BUFFER_FRONT_LEFT
:
751 if (intel_rb
&& intel_fb
->vblank_flags
&&
752 !(intel_fb
->vblank_flags
& VBLANK_FLAG_NO_IRQ
) &&
753 (intel_fb
->vbl_waited
- intel_rb
->vbl_pending
) > (1<<23)) {
756 vbl
.request
.type
= DRM_VBLANK_ABSOLUTE
;
758 if ( intel_fb
->vblank_flags
& VBLANK_FLAG_SECONDARY
) {
759 vbl
.request
.type
|= DRM_VBLANK_SECONDARY
;
762 vbl
.request
.sequence
= intel_rb
->vbl_pending
;
763 drmWaitVBlank(intel
->driFd
, &vbl
);
764 intel_fb
->vbl_waited
= vbl
.reply
.sequence
;
767 DRM_CAS(intel
->driHwLock
, intel
->hHWContext
,
768 (DRM_LOCK_HELD
|intel
->hHWContext
), __ret
);
771 intelContendedLock( intel
, 0 );
773 if (INTEL_DEBUG
& DEBUG_LOCK
)
774 _mesa_printf("%s - locked\n", __progname
);
780 /* Unlock the hardware using the global current context
782 void UNLOCK_HARDWARE( struct intel_context
*intel
)
786 DRM_UNLOCK(intel
->driFd
, intel
->driHwLock
, intel
->hHWContext
);
788 _glthread_UNLOCK_MUTEX(lockMutex
);
790 if (INTEL_DEBUG
& DEBUG_LOCK
)
791 _mesa_printf("%s - unlocked\n", __progname
);