i965: new integrated graphics chipset support
[mesa.git] / src / mesa / drivers / dri / i965 / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "glheader.h"
30 #include "context.h"
31 #include "matrix.h"
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
35 #include "imports.h"
36 #include "points.h"
37
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "tnl/tnl.h"
41 #include "vbo/vbo.h"
42
43 #include "tnl/t_pipeline.h"
44 #include "tnl/t_vertex.h"
45
46 #include "drivers/common/driverfuncs.h"
47
48 #include "intel_screen.h"
49 #include "intel_chipset.h"
50
51 #include "i830_dri.h"
52 #include "i830_common.h"
53
54 #include "intel_tex.h"
55 #include "intel_span.h"
56 #include "intel_ioctl.h"
57 #include "intel_batchbuffer.h"
58 #include "intel_blit.h"
59 #include "intel_regions.h"
60 #include "intel_buffers.h"
61 #include "intel_buffer_objects.h"
62 #include "intel_decode.h"
63 #include "intel_fbo.h"
64 #include "intel_bufmgr_ttm.h"
65
66 #include "drirenderbuffer.h"
67 #include "i915_drm.h"
68
69 #include "utils.h"
70 #include "vblank.h"
71 #ifndef INTEL_DEBUG
72 int INTEL_DEBUG = (0);
73 #endif
74
75 #define need_GL_NV_point_sprite
76 #define need_GL_ARB_multisample
77 #define need_GL_ARB_point_parameters
78 #define need_GL_ARB_texture_compression
79 #define need_GL_ARB_vertex_buffer_object
80 #define need_GL_ARB_vertex_program
81 #define need_GL_ARB_window_pos
82 #define need_GL_ARB_occlusion_query
83 #define need_GL_EXT_blend_color
84 #define need_GL_EXT_blend_equation_separate
85 #define need_GL_EXT_blend_func_separate
86 #define need_GL_EXT_blend_minmax
87 #define need_GL_EXT_cull_vertex
88 #define need_GL_EXT_fog_coord
89 #define need_GL_EXT_framebuffer_object
90 #define need_GL_EXT_multi_draw_arrays
91 #define need_GL_EXT_secondary_color
92 #define need_GL_ATI_separate_stencil
93 #define need_GL_EXT_point_parameters
94 #define need_GL_VERSION_2_0
95 #define need_GL_VERSION_2_1
96 #define need_GL_ARB_shader_objects
97 #define need_GL_ARB_vertex_shader
98
99 #include "extension_helper.h"
100
101 #ifndef VERBOSE
102 int VERBOSE = 0;
103 #endif
104
105 /***************************************
106 * Mesa's Driver Functions
107 ***************************************/
108
109 #define DRIVER_VERSION "4.1.3002"
110
111 static const GLubyte *intelGetString( GLcontext *ctx, GLenum name )
112 {
113 const char * chipset;
114 static char buffer[128];
115
116 switch (name) {
117 case GL_VENDOR:
118 return (GLubyte *)"Tungsten Graphics, Inc";
119 break;
120
121 case GL_RENDERER:
122 switch (intel_context(ctx)->intelScreen->deviceID) {
123 case PCI_CHIP_I965_Q:
124 chipset = "Intel(R) 965Q";
125 break;
126 case PCI_CHIP_I965_G:
127 case PCI_CHIP_I965_G_1:
128 chipset = "Intel(R) 965G";
129 break;
130 case PCI_CHIP_I946_GZ:
131 chipset = "Intel(R) 946GZ";
132 break;
133 case PCI_CHIP_I965_GM:
134 chipset = "Intel(R) 965GM";
135 break;
136 case PCI_CHIP_I965_GME:
137 chipset = "Intel(R) 965GME/GLE";
138 break;
139 case PCI_CHIP_IGD_GM:
140 chipset = "Intel(R) Integrated Graphics Device";
141 break;
142 default:
143 chipset = "Unknown Intel Chipset";
144 }
145
146 (void) driGetRendererString( buffer, chipset, DRIVER_VERSION, 0 );
147 return (GLubyte *) buffer;
148
149 default:
150 return NULL;
151 }
152 }
153
154
155 /**
156 * Extension strings exported by the intel driver.
157 *
158 * \note
159 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
160 * old i830-specific driver.
161 */
162 const struct dri_extension card_extensions[] =
163 {
164 { "GL_ARB_multisample", GL_ARB_multisample_functions },
165 { "GL_ARB_multitexture", NULL },
166 { "GL_ARB_point_parameters", GL_ARB_point_parameters_functions },
167 { "GL_NV_point_sprite", GL_NV_point_sprite_functions },
168 { "GL_ARB_texture_border_clamp", NULL },
169 { "GL_ARB_texture_compression", GL_ARB_texture_compression_functions },
170 { "GL_ARB_texture_cube_map", NULL },
171 { "GL_ARB_texture_env_add", NULL },
172 { "GL_ARB_texture_env_combine", NULL },
173 { "GL_ARB_texture_env_dot3", NULL },
174 { "GL_ARB_texture_mirrored_repeat", NULL },
175 { "GL_ARB_texture_non_power_of_two", NULL },
176 { "GL_ARB_texture_rectangle", NULL },
177 { "GL_NV_texture_rectangle", NULL },
178 { "GL_EXT_texture_rectangle", NULL },
179 { "GL_ARB_texture_rectangle", NULL },
180 { "GL_ARB_point_sprite", NULL},
181 { "GL_ARB_point_parameters", NULL },
182 { "GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions },
183 { "GL_ARB_vertex_program", GL_ARB_vertex_program_functions },
184 { "GL_ARB_window_pos", GL_ARB_window_pos_functions },
185 { "GL_EXT_blend_color", GL_EXT_blend_color_functions },
186 { "GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions },
187 { "GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions },
188 { "GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions },
189 { "GL_EXT_blend_logic_op", NULL },
190 { "GL_EXT_blend_subtract", NULL },
191 { "GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions },
192 { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions },
193 { "GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions },
194 { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions },
195 { "GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions },
196 { "GL_EXT_stencil_wrap", NULL },
197 /* Do not enable this extension. It conflicts with GL_ATI_separate_stencil
198 * and 2.0's separate stencil, because mesa's computed _TestTwoSide will
199 * only reflect whether it's enabled through this extension, even if the
200 * application is using the other interfaces.
201 */
202 /*{ "GL_EXT_stencil_two_side", GL_EXT_stencil_two_side_functions },*/
203 { "GL_EXT_texture_edge_clamp", NULL },
204 { "GL_EXT_texture_env_combine", NULL },
205 { "GL_EXT_texture_env_dot3", NULL },
206 { "GL_EXT_texture_filter_anisotropic", NULL },
207 { "GL_EXT_texture_lod_bias", NULL },
208 { "GL_EXT_texture_sRGB", NULL },
209 { "GL_3DFX_texture_compression_FXT1", NULL },
210 { "GL_APPLE_client_storage", NULL },
211 { "GL_MESA_pack_invert", NULL },
212 { "GL_MESA_ycbcr_texture", NULL },
213 { "GL_NV_blend_square", NULL },
214 { "GL_SGIS_generate_mipmap", NULL },
215 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions},
216 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions},
217 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
218 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
219 { "GL_ARB_fragment_shader", NULL },
220 { "GL_ARB_draw_buffers", NULL },
221 { NULL, NULL }
222 };
223
224 const struct dri_extension ttm_extensions[] = {
225 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
226 {"GL_ARB_pixel_buffer_object", NULL},
227 {NULL, NULL}
228 };
229
230 const struct dri_extension arb_oc_extension =
231 { "GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions};
232
233 /**
234 * Initializes potential list of extensions if ctx == NULL, or actually enables
235 * extensions for a context.
236 */
237 void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
238 {
239 struct intel_context *intel = ctx?intel_context(ctx):NULL;
240
241 /* Disable imaging extension until convolution is working in teximage paths.
242 */
243 enable_imaging = GL_FALSE;
244
245 driInitExtensions(ctx, card_extensions, enable_imaging);
246
247 if (intel == NULL || intel->ttm)
248 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
249
250 if (intel == NULL || intel->intelScreen->drmMinor >= 8)
251 driInitSingleExtension(ctx, &arb_oc_extension);
252 }
253
254 static const struct dri_debug_control debug_control[] =
255 {
256 { "fall", DEBUG_FALLBACKS },
257 { "tex", DEBUG_TEXTURE },
258 { "ioctl", DEBUG_IOCTL },
259 { "prim", DEBUG_PRIMS },
260 { "vert", DEBUG_VERTS },
261 { "state", DEBUG_STATE },
262 { "verb", DEBUG_VERBOSE },
263 { "dri", DEBUG_DRI },
264 { "dma", DEBUG_DMA },
265 { "san", DEBUG_SANITY },
266 { "sync", DEBUG_SYNC },
267 { "sleep", DEBUG_SLEEP },
268 { "pix", DEBUG_PIXEL },
269 { "buf", DEBUG_BUFMGR },
270 { "stats", DEBUG_STATS },
271 { "tile", DEBUG_TILE },
272 { "sing", DEBUG_SINGLE_THREAD },
273 { "thre", DEBUG_SINGLE_THREAD },
274 { "wm", DEBUG_WM },
275 { "vs", DEBUG_VS },
276 { "bat", DEBUG_BATCH },
277 { "blit", DEBUG_BLIT},
278 { "mip", DEBUG_MIPTREE},
279 { "reg", DEBUG_REGION},
280 { "fbo", DEBUG_FBO },
281 { NULL, 0 }
282 };
283
284
285 static void intelInvalidateState( GLcontext *ctx, GLuint new_state )
286 {
287 struct intel_context *intel = intel_context(ctx);
288
289 _swrast_InvalidateState( ctx, new_state );
290 _swsetup_InvalidateState( ctx, new_state );
291 _vbo_InvalidateState( ctx, new_state );
292 _tnl_InvalidateState( ctx, new_state );
293 _tnl_invalidate_vertex_state( ctx, new_state );
294
295 intel->NewGLState |= new_state;
296
297 if (intel->vtbl.invalidate_state)
298 intel->vtbl.invalidate_state( intel, new_state );
299 }
300
301
302 void intelFlush( GLcontext *ctx )
303 {
304 struct intel_context *intel = intel_context( ctx );
305
306 if (intel->batch->map != intel->batch->ptr)
307 intel_batchbuffer_flush(intel->batch);
308 }
309
310 void intelFinish( GLcontext *ctx )
311 {
312 struct intel_context *intel = intel_context( ctx );
313
314 intelFlush(ctx);
315 if (intel->batch->last_fence) {
316 dri_fence_wait(intel->batch->last_fence);
317 dri_fence_unreference(intel->batch->last_fence);
318 intel->batch->last_fence = NULL;
319 }
320 }
321
322 static void
323 intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
324 {
325 struct intel_context *intel = intel_context( ctx );
326 drmI830MMIO io = {
327 .read_write = MMIO_READ,
328 .reg = MMIO_REGS_PS_DEPTH_COUNT,
329 .data = &q->Result
330 };
331 intel->stats_wm++;
332 intelFinish(&intel->ctx);
333 drmCommandWrite(intel->driFd, DRM_I830_MMIO, &io, sizeof(io));
334 }
335
336 static void
337 intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
338 {
339 struct intel_context *intel = intel_context( ctx );
340 GLuint64EXT tmp;
341 drmI830MMIO io = {
342 .read_write = MMIO_READ,
343 .reg = MMIO_REGS_PS_DEPTH_COUNT,
344 .data = &tmp
345 };
346 intelFinish(&intel->ctx);
347 drmCommandWrite(intel->driFd, DRM_I830_MMIO, &io, sizeof(io));
348 q->Result = tmp - q->Result;
349 q->Ready = GL_TRUE;
350 intel->stats_wm--;
351 }
352
353 /** Driver-specific fence emit implementation for the fake memory manager. */
354 static unsigned int
355 intel_fence_emit(void *private)
356 {
357 struct intel_context *intel = (struct intel_context *)private;
358 unsigned int fence;
359
360 /* XXX: Need to emit a flush, if we haven't already (at least with the
361 * current batchbuffer implementation, we have).
362 */
363
364 fence = intelEmitIrqLocked(intel);
365
366 return fence;
367 }
368
369 /** Driver-specific fence wait implementation for the fake memory manager. */
370 static int
371 intel_fence_wait(void *private, unsigned int cookie)
372 {
373 struct intel_context *intel = (struct intel_context *)private;
374
375 intelWaitIrq(intel, cookie);
376
377 return 0;
378 }
379
380 static GLboolean
381 intel_init_bufmgr(struct intel_context *intel)
382 {
383 intelScreenPrivate *intelScreen = intel->intelScreen;
384 GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
385
386 /* If we've got a new enough DDX that's initializing TTM and giving us
387 * object handles for the shared buffers, use that.
388 */
389 intel->ttm = GL_FALSE;
390 if (!ttm_disable &&
391 intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
392 intel->intelScreen->drmMinor >= 11 &&
393 intel->intelScreen->front.bo_handle != -1)
394 {
395 intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
396 DRM_FENCE_TYPE_EXE,
397 DRM_FENCE_TYPE_EXE |
398 DRM_I915_FENCE_TYPE_RW,
399 BATCH_SZ);
400 if (intel->bufmgr != NULL)
401 intel->ttm = GL_TRUE;
402 }
403 /* Otherwise, use the classic buffer manager. */
404 if (intel->bufmgr == NULL) {
405 if (ttm_disable) {
406 fprintf(stderr, "TTM buffer manager disabled. Using classic.\n");
407 } else {
408 fprintf(stderr, "Failed to initialize TTM buffer manager. "
409 "Falling back to classic.\n");
410 }
411
412 if (intelScreen->tex.size == 0) {
413 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
414 __func__, __LINE__);
415 return GL_FALSE;
416 }
417
418 intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
419 intelScreen->tex.map,
420 intelScreen->tex.size,
421 intel_fence_emit,
422 intel_fence_wait,
423 intel);
424 }
425
426 return GL_TRUE;
427 }
428
429
430 void intelInitDriverFunctions( struct dd_function_table *functions )
431 {
432 _mesa_init_driver_functions( functions );
433
434 functions->Flush = intelFlush;
435 functions->Finish = intelFinish;
436 functions->GetString = intelGetString;
437 functions->UpdateState = intelInvalidateState;
438 functions->BeginQuery = intelBeginQuery;
439 functions->EndQuery = intelEndQuery;
440
441 /* CopyPixels can be accelerated even with the current memory
442 * manager:
443 */
444 if (!getenv("INTEL_NO_BLIT")) {
445 functions->CopyPixels = intelCopyPixels;
446 functions->Bitmap = intelBitmap;
447 }
448
449 intelInitTextureFuncs( functions );
450 intelInitStateFuncs( functions );
451 intelInitBufferFuncs( functions );
452 }
453
454 GLboolean intelInitContext( struct intel_context *intel,
455 const __GLcontextModes *mesaVis,
456 __DRIcontextPrivate *driContextPriv,
457 void *sharedContextPrivate,
458 struct dd_function_table *functions )
459 {
460 GLcontext *ctx = &intel->ctx;
461 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
462 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
463 intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
464 volatile drmI830Sarea *saPriv = (drmI830Sarea *)
465 (((GLubyte *)sPriv->pSAREA)+intelScreen->sarea_priv_offset);
466
467 if (!_mesa_initialize_context(&intel->ctx,
468 mesaVis, shareCtx,
469 functions,
470 (void*) intel)) {
471 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
472 return GL_FALSE;
473 }
474
475 driContextPriv->driverPrivate = intel;
476 intel->intelScreen = intelScreen;
477 intel->driScreen = sPriv;
478 intel->sarea = saPriv;
479
480 /* Dri stuff */
481 intel->hHWContext = driContextPriv->hHWContext;
482 intel->driFd = sPriv->fd;
483 intel->driHwLock = (drmLock *) &sPriv->pSAREA->lock;
484
485 intel->maxBatchSize = BATCH_SZ;
486
487 if (!intel_init_bufmgr(intel))
488 return GL_FALSE;
489
490 driParseConfigFiles (&intel->optionCache, &intelScreen->optionCache,
491 intel->driScreen->myNum, "i965");
492
493 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
494
495 if (getenv("INTEL_STRICT_CONFORMANCE")) {
496 intel->strict_conformance = 1;
497 }
498
499 if (intel->strict_conformance) {
500 ctx->Const.MinLineWidth = 1.0;
501 ctx->Const.MinLineWidthAA = 1.0;
502 ctx->Const.MaxLineWidth = 1.0;
503 ctx->Const.MaxLineWidthAA = 1.0;
504 ctx->Const.LineWidthGranularity = 1.0;
505 }
506 else {
507 ctx->Const.MinLineWidth = 1.0;
508 ctx->Const.MinLineWidthAA = 1.0;
509 ctx->Const.MaxLineWidth = 5.0;
510 ctx->Const.MaxLineWidthAA = 5.0;
511 ctx->Const.LineWidthGranularity = 0.5;
512 }
513
514 ctx->Const.MinPointSize = 1.0;
515 ctx->Const.MinPointSizeAA = 1.0;
516 ctx->Const.MaxPointSize = 255.0;
517 ctx->Const.MaxPointSizeAA = 3.0;
518 ctx->Const.PointSizeGranularity = 1.0;
519
520 /* reinitialize the context point state.
521 * It depend on constants in __GLcontextRec::Const
522 */
523 _mesa_init_point(ctx);
524
525 /* Initialize the software rasterizer and helper modules. */
526 _swrast_CreateContext( ctx );
527 _vbo_CreateContext( ctx );
528 _tnl_CreateContext( ctx );
529 _swsetup_CreateContext( ctx );
530
531 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
532
533 /* Configure swrast to match hardware characteristics: */
534 _swrast_allow_pixel_fog( ctx, GL_FALSE );
535 _swrast_allow_vertex_fog( ctx, GL_TRUE );
536
537 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
538 intel->hw_stipple = 1;
539
540 switch(mesaVis->depthBits) {
541 case 0: /* what to do in this case? */
542 case 16:
543 intel->polygon_offset_scale = 1.0/0xffff;
544 break;
545 case 24:
546 intel->polygon_offset_scale = 2.0/0xffffff; /* req'd to pass glean */
547 break;
548 default:
549 assert(0);
550 break;
551 }
552
553 /* Initialize swrast, tnl driver tables: */
554 intelInitSpanFuncs( ctx );
555
556 if (!intel->intelScreen->irq_active) {
557 _mesa_printf("IRQs not active. Exiting\n");
558 exit(1);
559 }
560 intelInitExtensions(ctx, GL_TRUE);
561
562 INTEL_DEBUG = driParseDebugString( getenv( "INTEL_DEBUG" ),
563 debug_control );
564 if (INTEL_DEBUG & DEBUG_BUFMGR)
565 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
566
567 intel_recreate_static_regions(intel);
568
569 intel_bufferobj_init( intel );
570 intel_fbo_init( intel );
571
572 intel->batch = intel_batchbuffer_alloc( intel );
573 intel->last_swap_fence = NULL;
574 intel->first_swap_fence = NULL;
575
576 if (intel->ctx.Mesa_DXTn) {
577 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
578 _mesa_enable_extension( ctx, "GL_S3_s3tc" );
579 }
580 else if (driQueryOptionb (&intel->optionCache, "force_s3tc_enable")) {
581 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
582 }
583
584 /* driInitTextureObjects( ctx, & intel->swapped, */
585 /* DRI_TEXMGR_DO_TEXTURE_1D | */
586 /* DRI_TEXMGR_DO_TEXTURE_2D | */
587 /* DRI_TEXMGR_DO_TEXTURE_RECT ); */
588
589 /* Force all software fallbacks */
590 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
591 fprintf(stderr, "disabling 3D rasterization\n");
592 intel->no_rast = 1;
593 }
594
595 /* Disable all hardware rendering (skip emitting batches and fences/waits
596 * to the kernel)
597 */
598 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
599
600 return GL_TRUE;
601 }
602
603 void intelDestroyContext(__DRIcontextPrivate *driContextPriv)
604 {
605 struct intel_context *intel = (struct intel_context *) driContextPriv->driverPrivate;
606
607 assert(intel); /* should never be null */
608 if (intel) {
609 GLboolean release_texture_heaps;
610
611
612 intel->vtbl.destroy( intel );
613
614 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
615 _swsetup_DestroyContext (&intel->ctx);
616 _tnl_DestroyContext (&intel->ctx);
617 _vbo_DestroyContext (&intel->ctx);
618
619 _swrast_DestroyContext (&intel->ctx);
620 intel->Fallback = 0; /* don't call _swrast_Flush later */
621 intel_batchbuffer_free(intel->batch);
622 intel->batch = NULL;
623
624 if (intel->last_swap_fence) {
625 dri_fence_wait(intel->last_swap_fence);
626 dri_fence_unreference(intel->last_swap_fence);
627 intel->last_swap_fence = NULL;
628 }
629 if (intel->first_swap_fence) {
630 dri_fence_wait(intel->first_swap_fence);
631 dri_fence_unreference(intel->first_swap_fence);
632 intel->first_swap_fence = NULL;
633 }
634
635 if ( release_texture_heaps ) {
636 /* This share group is about to go away, free our private
637 * texture object data.
638 */
639
640 /* XXX: destroy the shared bufmgr struct here?
641 */
642 }
643
644 /* free the Mesa context */
645 intel->ctx.VertexProgram.Current = NULL;
646 intel->ctx.FragmentProgram.Current = NULL;
647 _mesa_destroy_context(&intel->ctx);
648 }
649
650 driContextPriv->driverPrivate = NULL;
651 }
652
653 GLboolean intelUnbindContext(__DRIcontextPrivate *driContextPriv)
654 {
655 return GL_TRUE;
656 }
657
658 GLboolean intelMakeCurrent(__DRIcontextPrivate *driContextPriv,
659 __DRIdrawablePrivate *driDrawPriv,
660 __DRIdrawablePrivate *driReadPriv)
661 {
662
663 if (driContextPriv) {
664 struct intel_context *intel =
665 (struct intel_context *) driContextPriv->driverPrivate;
666 struct intel_framebuffer *intel_fb =
667 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
668 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
669
670 /* XXX FBO temporary fix-ups! */
671 /* if the renderbuffers don't have regions, init them from the context.
672 * They will be unreferenced when the renderbuffer is destroyed.
673 */
674 {
675 struct intel_renderbuffer *irbDepth
676 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
677 struct intel_renderbuffer *irbStencil
678 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
679
680 if (intel_fb->color_rb[0]) {
681 intel_renderbuffer_set_region(intel_fb->color_rb[0],
682 intel->front_region);
683 }
684 if (intel_fb->color_rb[1]) {
685 intel_renderbuffer_set_region(intel_fb->color_rb[1],
686 intel->back_region);
687 }
688 if (intel_fb->color_rb[2]) {
689 intel_renderbuffer_set_region(intel_fb->color_rb[2],
690 intel->third_region);
691 }
692 if (irbDepth) {
693 intel_renderbuffer_set_region(irbDepth, intel->depth_region);
694 }
695 if (irbStencil) {
696 intel_renderbuffer_set_region(irbStencil, intel->depth_region);
697 }
698 }
699
700 /* set GLframebuffer size to match window, if needed */
701 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
702
703 if (intel->driReadDrawable != driReadPriv) {
704 intel->driReadDrawable = driReadPriv;
705 }
706
707 if ( intel->driDrawable != driDrawPriv ) {
708 if (driDrawPriv->swap_interval == (unsigned)-1) {
709 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
710 ? driGetDefaultVBlankFlags(&intel->optionCache)
711 : VBLANK_FLAG_NO_IRQ;
712 driDrawableInitVBlank( driDrawPriv );
713 }
714
715 intel->driDrawable = driDrawPriv;
716 intelWindowMoved( intel );
717 /* Shouldn't the readbuffer be stored also? */
718 }
719
720 _mesa_make_current(&intel->ctx,
721 &intel_fb->Base,
722 readFb);
723
724 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
725 } else {
726 _mesa_make_current(NULL, NULL, NULL);
727 }
728
729 return GL_TRUE;
730 }
731
732
733 static void intelContendedLock( struct intel_context *intel, GLuint flags )
734 {
735 __DRIdrawablePrivate *dPriv = intel->driDrawable;
736 __DRIscreenPrivate *sPriv = intel->driScreen;
737 volatile drmI830Sarea * sarea = intel->sarea;
738 int me = intel->hHWContext;
739
740 drmGetLock(intel->driFd, intel->hHWContext, flags);
741
742 /* If the window moved, may need to set a new cliprect now.
743 *
744 * NOTE: This releases and regains the hw lock, so all state
745 * checking must be done *after* this call:
746 */
747 if (dPriv)
748 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
749
750
751 intel->locked = 1;
752
753 /* Lost context?
754 */
755 if (sarea->ctxOwner != me) {
756 if (INTEL_DEBUG & DEBUG_BUFMGR) {
757 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
758 sarea->ctxOwner, me);
759 }
760 sarea->ctxOwner = me;
761 }
762
763 /* If the last consumer of the texture memory wasn't us, notify the fake
764 * bufmgr and record the new owner. We should have the memory shared
765 * between contexts of a single fake bufmgr, but this will at least make
766 * things correct for now.
767 */
768 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
769 sarea->texAge = intel->hHWContext;
770 dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
771 if (INTEL_DEBUG & DEBUG_BATCH)
772 intel_decode_context_reset();
773 if (INTEL_DEBUG & DEBUG_BUFMGR) {
774 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
775 sarea->ctxOwner, intel->hHWContext);
776 }
777 }
778
779 /* Drawable changed?
780 */
781 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
782 intelWindowMoved( intel );
783 intel->lastStamp = dPriv->lastStamp;
784 }
785 }
786
787 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
788
789 /* Lock the hardware and validate our state.
790 */
791 void LOCK_HARDWARE( struct intel_context *intel )
792 {
793 char __ret=0;
794
795 _glthread_LOCK_MUTEX(lockMutex);
796 assert(!intel->locked);
797
798
799 DRM_CAS(intel->driHwLock, intel->hHWContext,
800 (DRM_LOCK_HELD|intel->hHWContext), __ret);
801 if (__ret)
802 intelContendedLock( intel, 0 );
803
804 intel->locked = 1;
805
806 }
807
808
809 /* Unlock the hardware using the global current context
810 */
811 void UNLOCK_HARDWARE( struct intel_context *intel )
812 {
813 intel->vtbl.note_unlock( intel );
814 intel->locked = 0;
815
816 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
817 _glthread_UNLOCK_MUTEX(lockMutex);
818
819 /**
820 * Nothing should be left in batch outside of LOCK/UNLOCK which references
821 * cliprects.
822 */
823 assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
824 }
825
826