[965] Actually enable SGIS_generate_mipmap.
[mesa.git] / src / mesa / drivers / dri / i965 / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "glheader.h"
30 #include "context.h"
31 #include "matrix.h"
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
35 #include "imports.h"
36 #include "points.h"
37
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "tnl/tnl.h"
41 #include "vbo/vbo.h"
42
43 #include "tnl/t_pipeline.h"
44 #include "tnl/t_vertex.h"
45
46 #include "drivers/common/driverfuncs.h"
47
48 #include "intel_screen.h"
49 #include "intel_chipset.h"
50
51 #include "i830_dri.h"
52 #include "i830_common.h"
53
54 #include "intel_tex.h"
55 #include "intel_span.h"
56 #include "intel_ioctl.h"
57 #include "intel_batchbuffer.h"
58 #include "intel_blit.h"
59 #include "intel_regions.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_decode.h"
62 #include "intel_bufmgr_ttm.h"
63
64 #include "i915_drm.h"
65
66 #include "utils.h"
67 #include "vblank.h"
68 #ifndef INTEL_DEBUG
69 int INTEL_DEBUG = (0);
70 #endif
71
72 #define need_GL_NV_point_sprite
73 #define need_GL_ARB_multisample
74 #define need_GL_ARB_point_parameters
75 #define need_GL_ARB_texture_compression
76 #define need_GL_ARB_vertex_buffer_object
77 #define need_GL_ARB_vertex_program
78 #define need_GL_ARB_window_pos
79 #define need_GL_ARB_occlusion_query
80 #define need_GL_EXT_blend_color
81 #define need_GL_EXT_blend_equation_separate
82 #define need_GL_EXT_blend_func_separate
83 #define need_GL_EXT_blend_minmax
84 #define need_GL_EXT_cull_vertex
85 #define need_GL_EXT_fog_coord
86 #define need_GL_EXT_multi_draw_arrays
87 #define need_GL_EXT_secondary_color
88 #define need_GL_EXT_point_parameters
89 #define need_GL_VERSION_2_0
90 #define need_GL_VERSION_2_1
91 #define need_GL_ARB_shader_objects
92 #define need_GL_ARB_vertex_shader
93
94 #include "extension_helper.h"
95
96 #ifndef VERBOSE
97 int VERBOSE = 0;
98 #endif
99
100 /***************************************
101 * Mesa's Driver Functions
102 ***************************************/
103
104 #define DRIVER_VERSION "4.1.3002"
105
106 static const GLubyte *intelGetString( GLcontext *ctx, GLenum name )
107 {
108 const char * chipset;
109 static char buffer[128];
110
111 switch (name) {
112 case GL_VENDOR:
113 return (GLubyte *)"Tungsten Graphics, Inc";
114 break;
115
116 case GL_RENDERER:
117 switch (intel_context(ctx)->intelScreen->deviceID) {
118 case PCI_CHIP_I965_Q:
119 chipset = "Intel(R) 965Q";
120 break;
121 case PCI_CHIP_I965_G:
122 case PCI_CHIP_I965_G_1:
123 chipset = "Intel(R) 965G";
124 break;
125 case PCI_CHIP_I946_GZ:
126 chipset = "Intel(R) 946GZ";
127 break;
128 case PCI_CHIP_I965_GM:
129 chipset = "Intel(R) 965GM";
130 break;
131 case PCI_CHIP_I965_GME:
132 chipset = "Intel(R) 965GME/GLE";
133 break;
134 default:
135 chipset = "Unknown Intel Chipset";
136 }
137
138 (void) driGetRendererString( buffer, chipset, DRIVER_VERSION, 0 );
139 return (GLubyte *) buffer;
140
141 default:
142 return NULL;
143 }
144 }
145
146
147 /**
148 * Extension strings exported by the intel driver.
149 *
150 * \note
151 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
152 * old i830-specific driver.
153 */
154 const struct dri_extension card_extensions[] =
155 {
156 { "GL_ARB_multisample", GL_ARB_multisample_functions },
157 { "GL_ARB_multitexture", NULL },
158 { "GL_ARB_point_parameters", GL_ARB_point_parameters_functions },
159 { "GL_NV_point_sprite", GL_NV_point_sprite_functions },
160 { "GL_ARB_texture_border_clamp", NULL },
161 { "GL_ARB_texture_compression", GL_ARB_texture_compression_functions },
162 { "GL_ARB_texture_cube_map", NULL },
163 { "GL_ARB_texture_env_add", NULL },
164 { "GL_ARB_texture_env_combine", NULL },
165 { "GL_ARB_texture_env_dot3", NULL },
166 { "GL_ARB_texture_mirrored_repeat", NULL },
167 { "GL_ARB_texture_non_power_of_two", NULL },
168 { "GL_ARB_texture_rectangle", NULL },
169 { "GL_NV_texture_rectangle", NULL },
170 { "GL_EXT_texture_rectangle", NULL },
171 { "GL_ARB_texture_rectangle", NULL },
172 { "GL_ARB_point_sprite", NULL},
173 { "GL_ARB_point_parameters", NULL },
174 { "GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions },
175 { "GL_ARB_vertex_program", GL_ARB_vertex_program_functions },
176 { "GL_ARB_window_pos", GL_ARB_window_pos_functions },
177 { "GL_EXT_blend_color", GL_EXT_blend_color_functions },
178 { "GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions },
179 { "GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions },
180 { "GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions },
181 { "GL_EXT_blend_logic_op", NULL },
182 { "GL_EXT_blend_subtract", NULL },
183 { "GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions },
184 { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions },
185 { "GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions },
186 { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions },
187 { "GL_EXT_stencil_wrap", NULL },
188 { "GL_EXT_texture_edge_clamp", NULL },
189 { "GL_EXT_texture_env_combine", NULL },
190 { "GL_EXT_texture_env_dot3", NULL },
191 { "GL_EXT_texture_filter_anisotropic", NULL },
192 { "GL_EXT_texture_lod_bias", NULL },
193 { "GL_EXT_texture_sRGB", NULL },
194 { "GL_3DFX_texture_compression_FXT1", NULL },
195 { "GL_APPLE_client_storage", NULL },
196 { "GL_MESA_pack_invert", NULL },
197 { "GL_MESA_ycbcr_texture", NULL },
198 { "GL_NV_blend_square", NULL },
199 { "GL_SGIS_generate_mipmap", NULL },
200 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions},
201 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions},
202 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
203 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
204 { "GL_ARB_fragment_shader", NULL },
205 /* XXX not implement yet, to compile builtin glsl lib */
206 { "GL_ARB_draw_buffers", NULL },
207 { NULL, NULL }
208 };
209
210 const struct dri_extension ttm_extensions[] = {
211 {"GL_ARB_pixel_buffer_object", NULL},
212 {NULL, NULL}
213 };
214
215 const struct dri_extension arb_oc_extension =
216 { "GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions};
217
218 /**
219 * Initializes potential list of extensions if ctx == NULL, or actually enables
220 * extensions for a context.
221 */
222 void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
223 {
224 struct intel_context *intel = ctx?intel_context(ctx):NULL;
225
226 /* Disable imaging extension until convolution is working in teximage paths.
227 */
228 enable_imaging = GL_FALSE;
229
230 driInitExtensions(ctx, card_extensions, enable_imaging);
231
232 if (intel == NULL || intel->ttm)
233 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
234
235 if (intel == NULL || intel->intelScreen->drmMinor >= 8)
236 driInitSingleExtension(ctx, &arb_oc_extension);
237 }
238
239 static const struct dri_debug_control debug_control[] =
240 {
241 { "fall", DEBUG_FALLBACKS },
242 { "tex", DEBUG_TEXTURE },
243 { "ioctl", DEBUG_IOCTL },
244 { "prim", DEBUG_PRIMS },
245 { "vert", DEBUG_VERTS },
246 { "state", DEBUG_STATE },
247 { "verb", DEBUG_VERBOSE },
248 { "dri", DEBUG_DRI },
249 { "dma", DEBUG_DMA },
250 { "san", DEBUG_SANITY },
251 { "sync", DEBUG_SYNC },
252 { "sleep", DEBUG_SLEEP },
253 { "pix", DEBUG_PIXEL },
254 { "buf", DEBUG_BUFMGR },
255 { "stats", DEBUG_STATS },
256 { "tile", DEBUG_TILE },
257 { "sing", DEBUG_SINGLE_THREAD },
258 { "thre", DEBUG_SINGLE_THREAD },
259 { "wm", DEBUG_WM },
260 { "vs", DEBUG_VS },
261 { "bat", DEBUG_BATCH },
262 { "blit", DEBUG_BLIT},
263 { "mip", DEBUG_MIPTREE},
264 { "reg", DEBUG_REGION},
265 { NULL, 0 }
266 };
267
268
269 static void intelInvalidateState( GLcontext *ctx, GLuint new_state )
270 {
271 struct intel_context *intel = intel_context(ctx);
272
273 _swrast_InvalidateState( ctx, new_state );
274 _swsetup_InvalidateState( ctx, new_state );
275 _vbo_InvalidateState( ctx, new_state );
276 _tnl_InvalidateState( ctx, new_state );
277 _tnl_invalidate_vertex_state( ctx, new_state );
278
279 intel->NewGLState |= new_state;
280
281 if (intel->vtbl.invalidate_state)
282 intel->vtbl.invalidate_state( intel, new_state );
283 }
284
285
286 void intelFlush( GLcontext *ctx )
287 {
288 struct intel_context *intel = intel_context( ctx );
289
290 if (intel->batch->map != intel->batch->ptr)
291 intel_batchbuffer_flush(intel->batch);
292 }
293
294 void intelFinish( GLcontext *ctx )
295 {
296 struct intel_context *intel = intel_context( ctx );
297
298 intelFlush(ctx);
299 if (intel->batch->last_fence) {
300 dri_fence_wait(intel->batch->last_fence);
301 dri_fence_unreference(intel->batch->last_fence);
302 intel->batch->last_fence = NULL;
303 }
304 }
305
306 static void
307 intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
308 {
309 struct intel_context *intel = intel_context( ctx );
310 drmI830MMIO io = {
311 .read_write = MMIO_READ,
312 .reg = MMIO_REGS_PS_DEPTH_COUNT,
313 .data = &q->Result
314 };
315 intel->stats_wm++;
316 intelFinish(&intel->ctx);
317 drmCommandWrite(intel->driFd, DRM_I830_MMIO, &io, sizeof(io));
318 }
319
320 static void
321 intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
322 {
323 struct intel_context *intel = intel_context( ctx );
324 GLuint64EXT tmp;
325 drmI830MMIO io = {
326 .read_write = MMIO_READ,
327 .reg = MMIO_REGS_PS_DEPTH_COUNT,
328 .data = &tmp
329 };
330 intelFinish(&intel->ctx);
331 drmCommandWrite(intel->driFd, DRM_I830_MMIO, &io, sizeof(io));
332 q->Result = tmp - q->Result;
333 q->Ready = GL_TRUE;
334 intel->stats_wm--;
335 }
336
337 /** Driver-specific fence emit implementation for the fake memory manager. */
338 static unsigned int
339 intel_fence_emit(void *private)
340 {
341 struct intel_context *intel = (struct intel_context *)private;
342 unsigned int fence;
343
344 /* XXX: Need to emit a flush, if we haven't already (at least with the
345 * current batchbuffer implementation, we have).
346 */
347
348 fence = intelEmitIrqLocked(intel);
349
350 return fence;
351 }
352
353 /** Driver-specific fence wait implementation for the fake memory manager. */
354 static int
355 intel_fence_wait(void *private, unsigned int cookie)
356 {
357 struct intel_context *intel = (struct intel_context *)private;
358
359 intelWaitIrq(intel, cookie);
360
361 return 0;
362 }
363
364 static GLboolean
365 intel_init_bufmgr(struct intel_context *intel)
366 {
367 intelScreenPrivate *intelScreen = intel->intelScreen;
368 GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
369
370 /* If we've got a new enough DDX that's initializing TTM and giving us
371 * object handles for the shared buffers, use that.
372 */
373 intel->ttm = GL_FALSE;
374 if (!ttm_disable &&
375 intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
376 intel->intelScreen->drmMinor >= 11 &&
377 intel->intelScreen->front.bo_handle != -1)
378 {
379 intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
380 DRM_FENCE_TYPE_EXE,
381 DRM_FENCE_TYPE_EXE |
382 DRM_I915_FENCE_TYPE_RW,
383 BATCH_SZ);
384 if (intel->bufmgr != NULL)
385 intel->ttm = GL_TRUE;
386 }
387 /* Otherwise, use the classic buffer manager. */
388 if (intel->bufmgr == NULL) {
389 if (ttm_disable) {
390 fprintf(stderr, "TTM buffer manager disabled. Using classic.\n");
391 } else {
392 fprintf(stderr, "Failed to initialize TTM buffer manager. "
393 "Falling back to classic.\n");
394 }
395
396 if (intelScreen->tex.size == 0) {
397 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
398 __func__, __LINE__);
399 return GL_FALSE;
400 }
401
402 intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
403 intelScreen->tex.map,
404 intelScreen->tex.size,
405 intel_fence_emit,
406 intel_fence_wait,
407 intel);
408 }
409
410 return GL_TRUE;
411 }
412
413
414 void intelInitDriverFunctions( struct dd_function_table *functions )
415 {
416 _mesa_init_driver_functions( functions );
417
418 functions->Flush = intelFlush;
419 functions->Finish = intelFinish;
420 functions->GetString = intelGetString;
421 functions->UpdateState = intelInvalidateState;
422 functions->BeginQuery = intelBeginQuery;
423 functions->EndQuery = intelEndQuery;
424
425 /* CopyPixels can be accelerated even with the current memory
426 * manager:
427 */
428 if (!getenv("INTEL_NO_BLIT")) {
429 functions->CopyPixels = intelCopyPixels;
430 functions->Bitmap = intelBitmap;
431 }
432
433 intelInitTextureFuncs( functions );
434 intelInitStateFuncs( functions );
435 intelInitBufferFuncs( functions );
436 }
437
438 GLboolean intelInitContext( struct intel_context *intel,
439 const __GLcontextModes *mesaVis,
440 __DRIcontextPrivate *driContextPriv,
441 void *sharedContextPrivate,
442 struct dd_function_table *functions )
443 {
444 GLcontext *ctx = &intel->ctx;
445 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
446 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
447 intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
448 volatile drmI830Sarea *saPriv = (drmI830Sarea *)
449 (((GLubyte *)sPriv->pSAREA)+intelScreen->sarea_priv_offset);
450
451 if (!_mesa_initialize_context(&intel->ctx,
452 mesaVis, shareCtx,
453 functions,
454 (void*) intel)) {
455 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
456 return GL_FALSE;
457 }
458
459 driContextPriv->driverPrivate = intel;
460 intel->intelScreen = intelScreen;
461 intel->driScreen = sPriv;
462 intel->sarea = saPriv;
463
464 /* Dri stuff */
465 intel->hHWContext = driContextPriv->hHWContext;
466 intel->driFd = sPriv->fd;
467 intel->driHwLock = (drmLock *) &sPriv->pSAREA->lock;
468
469 intel->maxBatchSize = BATCH_SZ;
470
471 if (!intel_init_bufmgr(intel))
472 return GL_FALSE;
473
474 driParseConfigFiles (&intel->optionCache, &intelScreen->optionCache,
475 intel->driScreen->myNum, "i965");
476
477 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
478
479 if (getenv("INTEL_STRICT_CONFORMANCE")) {
480 intel->strict_conformance = 1;
481 }
482
483 if (intel->strict_conformance) {
484 ctx->Const.MinLineWidth = 1.0;
485 ctx->Const.MinLineWidthAA = 1.0;
486 ctx->Const.MaxLineWidth = 1.0;
487 ctx->Const.MaxLineWidthAA = 1.0;
488 ctx->Const.LineWidthGranularity = 1.0;
489 }
490 else {
491 ctx->Const.MinLineWidth = 1.0;
492 ctx->Const.MinLineWidthAA = 1.0;
493 ctx->Const.MaxLineWidth = 5.0;
494 ctx->Const.MaxLineWidthAA = 5.0;
495 ctx->Const.LineWidthGranularity = 0.5;
496 }
497
498 ctx->Const.MinPointSize = 1.0;
499 ctx->Const.MinPointSizeAA = 1.0;
500 ctx->Const.MaxPointSize = 255.0;
501 ctx->Const.MaxPointSizeAA = 3.0;
502 ctx->Const.PointSizeGranularity = 1.0;
503
504 /* reinitialize the context point state.
505 * It depend on constants in __GLcontextRec::Const
506 */
507 _mesa_init_point(ctx);
508
509 /* Initialize the software rasterizer and helper modules. */
510 _swrast_CreateContext( ctx );
511 _vbo_CreateContext( ctx );
512 _tnl_CreateContext( ctx );
513 _swsetup_CreateContext( ctx );
514
515 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
516
517 /* Configure swrast to match hardware characteristics: */
518 _swrast_allow_pixel_fog( ctx, GL_FALSE );
519 _swrast_allow_vertex_fog( ctx, GL_TRUE );
520
521 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
522 intel->hw_stipple = 1;
523
524 switch(mesaVis->depthBits) {
525 case 0: /* what to do in this case? */
526 case 16:
527 intel->polygon_offset_scale = 1.0/0xffff;
528 break;
529 case 24:
530 intel->polygon_offset_scale = 2.0/0xffffff; /* req'd to pass glean */
531 break;
532 default:
533 assert(0);
534 break;
535 }
536
537 /* Initialize swrast, tnl driver tables: */
538 intelInitSpanFuncs( ctx );
539
540 if (!intel->intelScreen->irq_active) {
541 _mesa_printf("IRQs not active. Exiting\n");
542 exit(1);
543 }
544 intelInitExtensions(ctx, GL_TRUE);
545
546 INTEL_DEBUG = driParseDebugString( getenv( "INTEL_DEBUG" ),
547 debug_control );
548 if (INTEL_DEBUG & DEBUG_BUFMGR)
549 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
550
551 intel_recreate_static_regions(intel);
552
553 intel_bufferobj_init( intel );
554 intel->batch = intel_batchbuffer_alloc( intel );
555 intel->last_swap_fence = NULL;
556 intel->first_swap_fence = NULL;
557
558 if (intel->ctx.Mesa_DXTn) {
559 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
560 _mesa_enable_extension( ctx, "GL_S3_s3tc" );
561 }
562 else if (driQueryOptionb (&intel->optionCache, "force_s3tc_enable")) {
563 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
564 }
565
566 /* driInitTextureObjects( ctx, & intel->swapped, */
567 /* DRI_TEXMGR_DO_TEXTURE_1D | */
568 /* DRI_TEXMGR_DO_TEXTURE_2D | */
569 /* DRI_TEXMGR_DO_TEXTURE_RECT ); */
570
571 /* Force all software fallbacks */
572 if (getenv("INTEL_NO_RAST")) {
573 fprintf(stderr, "disabling 3D rasterization\n");
574 intel->no_rast = 1;
575 }
576
577 /* Disable all hardware rendering (skip emitting batches and fences/waits
578 * to the kernel)
579 */
580 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
581
582 return GL_TRUE;
583 }
584
585 void intelDestroyContext(__DRIcontextPrivate *driContextPriv)
586 {
587 struct intel_context *intel = (struct intel_context *) driContextPriv->driverPrivate;
588
589 assert(intel); /* should never be null */
590 if (intel) {
591 GLboolean release_texture_heaps;
592
593
594 intel->vtbl.destroy( intel );
595
596 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
597 _swsetup_DestroyContext (&intel->ctx);
598 _tnl_DestroyContext (&intel->ctx);
599 _vbo_DestroyContext (&intel->ctx);
600
601 _swrast_DestroyContext (&intel->ctx);
602 intel->Fallback = 0; /* don't call _swrast_Flush later */
603 intel_batchbuffer_free(intel->batch);
604 intel->batch = NULL;
605
606 if (intel->last_swap_fence) {
607 dri_fence_wait(intel->last_swap_fence);
608 dri_fence_unreference(intel->last_swap_fence);
609 intel->last_swap_fence = NULL;
610 }
611 if (intel->first_swap_fence) {
612 dri_fence_wait(intel->first_swap_fence);
613 dri_fence_unreference(intel->first_swap_fence);
614 intel->first_swap_fence = NULL;
615 }
616
617 if ( release_texture_heaps ) {
618 /* This share group is about to go away, free our private
619 * texture object data.
620 */
621
622 /* XXX: destroy the shared bufmgr struct here?
623 */
624 }
625
626 /* Free the regions created to describe front/back/depth
627 * buffers:
628 */
629 #if 0
630 intel_region_release(&intel->front_region);
631 intel_region_release(&intel->back_region);
632 intel_region_release(&intel->depth_region);
633 intel_region_release(&intel->draw_region);
634 #endif
635
636 /* free the Mesa context */
637 intel->ctx.VertexProgram.Current = NULL;
638 intel->ctx.FragmentProgram.Current = NULL;
639 _mesa_destroy_context(&intel->ctx);
640 }
641
642 driContextPriv->driverPrivate = NULL;
643 }
644
645 GLboolean intelUnbindContext(__DRIcontextPrivate *driContextPriv)
646 {
647 return GL_TRUE;
648 }
649
650 GLboolean intelMakeCurrent(__DRIcontextPrivate *driContextPriv,
651 __DRIdrawablePrivate *driDrawPriv,
652 __DRIdrawablePrivate *driReadPriv)
653 {
654
655 if (driContextPriv) {
656 struct intel_context *intel = (struct intel_context *) driContextPriv->driverPrivate;
657
658 if (intel->driReadDrawable != driReadPriv) {
659 intel->driReadDrawable = driReadPriv;
660 }
661
662 if ( intel->driDrawable != driDrawPriv ) {
663 if (driDrawPriv->swap_interval == (unsigned)-1) {
664 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
665 ? driGetDefaultVBlankFlags(&intel->optionCache)
666 : VBLANK_FLAG_NO_IRQ;
667 driDrawableInitVBlank( driDrawPriv );
668 }
669
670 intel->driDrawable = driDrawPriv;
671 intelWindowMoved( intel );
672 /* Shouldn't the readbuffer be stored also? */
673 }
674
675 _mesa_make_current(&intel->ctx,
676 (GLframebuffer *) driDrawPriv->driverPrivate,
677 (GLframebuffer *) driReadPriv->driverPrivate);
678
679 intel->ctx.Driver.DrawBuffer( &intel->ctx, intel->ctx.Color.DrawBuffer[0] );
680 } else {
681 _mesa_make_current(NULL, NULL, NULL);
682 }
683
684 return GL_TRUE;
685 }
686
687
688 static void intelContendedLock( struct intel_context *intel, GLuint flags )
689 {
690 __DRIdrawablePrivate *dPriv = intel->driDrawable;
691 __DRIscreenPrivate *sPriv = intel->driScreen;
692 volatile drmI830Sarea * sarea = intel->sarea;
693 int me = intel->hHWContext;
694
695 drmGetLock(intel->driFd, intel->hHWContext, flags);
696
697 /* If the window moved, may need to set a new cliprect now.
698 *
699 * NOTE: This releases and regains the hw lock, so all state
700 * checking must be done *after* this call:
701 */
702 if (dPriv)
703 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
704
705
706 intel->locked = 1;
707 intel->need_flush = 1;
708
709 /* Lost context?
710 */
711 if (sarea->ctxOwner != me) {
712 if (INTEL_DEBUG & DEBUG_BUFMGR) {
713 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
714 sarea->ctxOwner, me);
715 }
716 sarea->ctxOwner = me;
717 intel->vtbl.lost_hardware( intel );
718 }
719
720 /* If the last consumer of the texture memory wasn't us, notify the fake
721 * bufmgr and record the new owner. We should have the memory shared
722 * between contexts of a single fake bufmgr, but this will at least make
723 * things correct for now.
724 */
725 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
726 sarea->texAge = intel->hHWContext;
727 dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
728 if (INTEL_DEBUG & DEBUG_BATCH)
729 intel_decode_context_reset();
730 if (INTEL_DEBUG & DEBUG_BUFMGR) {
731 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
732 sarea->ctxOwner, intel->hHWContext);
733 }
734 }
735
736 /* Drawable changed?
737 */
738 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
739 intelWindowMoved( intel );
740 intel->lastStamp = dPriv->lastStamp;
741 }
742 }
743
744 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
745
746 /* Lock the hardware and validate our state.
747 */
748 void LOCK_HARDWARE( struct intel_context *intel )
749 {
750 char __ret=0;
751
752 _glthread_LOCK_MUTEX(lockMutex);
753 assert(!intel->locked);
754
755
756 DRM_CAS(intel->driHwLock, intel->hHWContext,
757 (DRM_LOCK_HELD|intel->hHWContext), __ret);
758 if (__ret)
759 intelContendedLock( intel, 0 );
760
761 intel->locked = 1;
762
763 }
764
765
766 /* Unlock the hardware using the global current context
767 */
768 void UNLOCK_HARDWARE( struct intel_context *intel )
769 {
770 intel->vtbl.note_unlock( intel );
771 intel->locked = 0;
772
773 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
774 _glthread_UNLOCK_MUTEX(lockMutex);
775 }
776
777