Merge {i915,i965}/intel_context.h as intel/intel_context.h
[mesa.git] / src / mesa / drivers / dri / i965 / intel_context.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "glheader.h"
30 #include "context.h"
31 #include "matrix.h"
32 #include "simple_list.h"
33 #include "extensions.h"
34 #include "framebuffer.h"
35 #include "imports.h"
36 #include "points.h"
37
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "tnl/tnl.h"
41 #include "vbo/vbo.h"
42
43 #include "tnl/t_pipeline.h"
44 #include "tnl/t_vertex.h"
45
46 #include "drivers/common/driverfuncs.h"
47
48 #include "intel_screen.h"
49 #include "intel_chipset.h"
50
51 #include "i830_dri.h"
52
53 #include "intel_tex.h"
54 #include "intel_span.h"
55 #include "intel_ioctl.h"
56 #include "intel_batchbuffer.h"
57 #include "intel_blit.h"
58 #include "intel_regions.h"
59 #include "intel_buffers.h"
60 #include "intel_buffer_objects.h"
61 #include "intel_decode.h"
62 #include "intel_fbo.h"
63 #include "intel_bufmgr_ttm.h"
64 #include "intel_pixel.h"
65
66 #include "drirenderbuffer.h"
67 #include "i915_drm.h"
68
69 #include "utils.h"
70 #include "vblank.h"
71 #ifndef INTEL_DEBUG
72 int INTEL_DEBUG = (0);
73 #endif
74
75 #define need_GL_NV_point_sprite
76 #define need_GL_ARB_multisample
77 #define need_GL_ARB_point_parameters
78 #define need_GL_ARB_texture_compression
79 #define need_GL_ARB_vertex_buffer_object
80 #define need_GL_ARB_vertex_program
81 #define need_GL_ARB_window_pos
82 #define need_GL_ARB_occlusion_query
83 #define need_GL_EXT_blend_color
84 #define need_GL_EXT_blend_equation_separate
85 #define need_GL_EXT_blend_func_separate
86 #define need_GL_EXT_blend_minmax
87 #define need_GL_EXT_cull_vertex
88 #define need_GL_EXT_fog_coord
89 #define need_GL_EXT_framebuffer_object
90 #define need_GL_EXT_multi_draw_arrays
91 #define need_GL_EXT_secondary_color
92 #define need_GL_ATI_separate_stencil
93 #define need_GL_EXT_point_parameters
94 #define need_GL_VERSION_2_0
95 #define need_GL_VERSION_2_1
96 #define need_GL_ARB_shader_objects
97 #define need_GL_ARB_vertex_shader
98
99 #include "extension_helper.h"
100
101 #ifndef VERBOSE
102 int VERBOSE = 0;
103 #endif
104
105 /***************************************
106 * Mesa's Driver Functions
107 ***************************************/
108
109 #define DRIVER_VERSION "4.1.3002"
110
111 static const GLubyte *intelGetString( GLcontext *ctx, GLenum name )
112 {
113 const char * chipset;
114 static char buffer[128];
115
116 switch (name) {
117 case GL_VENDOR:
118 return (GLubyte *)"Tungsten Graphics, Inc";
119 break;
120
121 case GL_RENDERER:
122 switch (intel_context(ctx)->intelScreen->deviceID) {
123 case PCI_CHIP_I965_Q:
124 chipset = "Intel(R) 965Q";
125 break;
126 case PCI_CHIP_I965_G:
127 case PCI_CHIP_I965_G_1:
128 chipset = "Intel(R) 965G";
129 break;
130 case PCI_CHIP_I946_GZ:
131 chipset = "Intel(R) 946GZ";
132 break;
133 case PCI_CHIP_I965_GM:
134 chipset = "Intel(R) 965GM";
135 break;
136 case PCI_CHIP_I965_GME:
137 chipset = "Intel(R) 965GME/GLE";
138 break;
139 case PCI_CHIP_IGD_GM:
140 chipset = "Intel(R) Integrated Graphics Device";
141 break;
142 default:
143 chipset = "Unknown Intel Chipset";
144 }
145
146 (void) driGetRendererString( buffer, chipset, DRIVER_VERSION, 0 );
147 return (GLubyte *) buffer;
148
149 default:
150 return NULL;
151 }
152 }
153
154
155 /**
156 * Extension strings exported by the intel driver.
157 *
158 * \note
159 * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
160 * old i830-specific driver.
161 */
162 const struct dri_extension card_extensions[] =
163 {
164 { "GL_ARB_multisample", GL_ARB_multisample_functions },
165 { "GL_ARB_multitexture", NULL },
166 { "GL_ARB_point_parameters", GL_ARB_point_parameters_functions },
167 { "GL_NV_point_sprite", GL_NV_point_sprite_functions },
168 { "GL_ARB_texture_border_clamp", NULL },
169 { "GL_ARB_texture_compression", GL_ARB_texture_compression_functions },
170 { "GL_ARB_texture_cube_map", NULL },
171 { "GL_ARB_texture_env_add", NULL },
172 { "GL_ARB_texture_env_combine", NULL },
173 { "GL_ARB_texture_env_dot3", NULL },
174 { "GL_ARB_texture_mirrored_repeat", NULL },
175 { "GL_ARB_texture_non_power_of_two", NULL },
176 { "GL_ARB_texture_rectangle", NULL },
177 { "GL_NV_texture_rectangle", NULL },
178 { "GL_EXT_texture_rectangle", NULL },
179 { "GL_ARB_texture_rectangle", NULL },
180 { "GL_ARB_point_sprite", NULL},
181 { "GL_ARB_point_parameters", NULL },
182 { "GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions },
183 { "GL_ARB_vertex_program", GL_ARB_vertex_program_functions },
184 { "GL_ARB_window_pos", GL_ARB_window_pos_functions },
185 { "GL_EXT_blend_color", GL_EXT_blend_color_functions },
186 { "GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions },
187 { "GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions },
188 { "GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions },
189 { "GL_EXT_blend_logic_op", NULL },
190 { "GL_EXT_blend_subtract", NULL },
191 { "GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions },
192 { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions },
193 { "GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions },
194 { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions },
195 { "GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions },
196 { "GL_EXT_stencil_wrap", NULL },
197 /* Do not enable this extension. It conflicts with GL_ATI_separate_stencil
198 * and 2.0's separate stencil, because mesa's computed _TestTwoSide will
199 * only reflect whether it's enabled through this extension, even if the
200 * application is using the other interfaces.
201 */
202 /*{ "GL_EXT_stencil_two_side", GL_EXT_stencil_two_side_functions },*/
203 { "GL_EXT_texture_edge_clamp", NULL },
204 { "GL_EXT_texture_env_combine", NULL },
205 { "GL_EXT_texture_env_dot3", NULL },
206 { "GL_EXT_texture_filter_anisotropic", NULL },
207 { "GL_EXT_texture_lod_bias", NULL },
208 { "GL_EXT_texture_sRGB", NULL },
209 { "GL_3DFX_texture_compression_FXT1", NULL },
210 { "GL_APPLE_client_storage", NULL },
211 { "GL_MESA_pack_invert", NULL },
212 { "GL_MESA_ycbcr_texture", NULL },
213 { "GL_NV_blend_square", NULL },
214 { "GL_SGIS_generate_mipmap", NULL },
215 { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions},
216 { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions},
217 { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions},
218 { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions},
219 { "GL_ARB_fragment_shader", NULL },
220 { "GL_ARB_draw_buffers", NULL },
221 { NULL, NULL }
222 };
223
224 const struct dri_extension ttm_extensions[] = {
225 {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
226 {"GL_ARB_pixel_buffer_object", NULL},
227 {NULL, NULL}
228 };
229
230 const struct dri_extension arb_oc_extensions[] = {
231 {"GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions},
232 {NULL, NULL}
233 };
234
235 /**
236 * Initializes potential list of extensions if ctx == NULL, or actually enables
237 * extensions for a context.
238 */
239 static void intelInitExtensions(GLcontext *ctx, GLboolean enable_imaging)
240 {
241 struct intel_context *intel = ctx?intel_context(ctx):NULL;
242
243 /* Disable imaging extension until convolution is working in teximage paths.
244 */
245 enable_imaging = GL_FALSE;
246
247 driInitExtensions(ctx, card_extensions, enable_imaging);
248
249 if (intel == NULL || intel->ttm)
250 driInitExtensions(ctx, ttm_extensions, GL_FALSE);
251
252 if (intel == NULL || intel->intelScreen->drmMinor >= 8)
253 driInitExtensions(ctx, arb_oc_extensions, GL_FALSE);
254 }
255
256 static const struct dri_debug_control debug_control[] =
257 {
258 { "fall", DEBUG_FALLBACKS },
259 { "tex", DEBUG_TEXTURE },
260 { "ioctl", DEBUG_IOCTL },
261 { "prim", DEBUG_PRIMS },
262 { "vert", DEBUG_VERTS },
263 { "state", DEBUG_STATE },
264 { "verb", DEBUG_VERBOSE },
265 { "dri", DEBUG_DRI },
266 { "dma", DEBUG_DMA },
267 { "san", DEBUG_SANITY },
268 { "sync", DEBUG_SYNC },
269 { "sleep", DEBUG_SLEEP },
270 { "pix", DEBUG_PIXEL },
271 { "buf", DEBUG_BUFMGR },
272 { "stats", DEBUG_STATS },
273 { "tile", DEBUG_TILE },
274 { "sing", DEBUG_SINGLE_THREAD },
275 { "thre", DEBUG_SINGLE_THREAD },
276 { "wm", DEBUG_WM },
277 { "vs", DEBUG_VS },
278 { "bat", DEBUG_BATCH },
279 { "blit", DEBUG_BLIT},
280 { "mip", DEBUG_MIPTREE},
281 { "reg", DEBUG_REGION},
282 { "fbo", DEBUG_FBO },
283 { NULL, 0 }
284 };
285
286
287 static void intelInvalidateState( GLcontext *ctx, GLuint new_state )
288 {
289 struct intel_context *intel = intel_context(ctx);
290
291 _swrast_InvalidateState( ctx, new_state );
292 _swsetup_InvalidateState( ctx, new_state );
293 _vbo_InvalidateState( ctx, new_state );
294 _tnl_InvalidateState( ctx, new_state );
295 _tnl_invalidate_vertex_state( ctx, new_state );
296
297 intel->NewGLState |= new_state;
298
299 if (intel->vtbl.invalidate_state)
300 intel->vtbl.invalidate_state( intel, new_state );
301 }
302
303
304 void intelFlush( GLcontext *ctx )
305 {
306 struct intel_context *intel = intel_context( ctx );
307
308 if (intel->batch->map != intel->batch->ptr)
309 intel_batchbuffer_flush(intel->batch);
310 }
311
312 void intelFinish( GLcontext *ctx )
313 {
314 struct intel_context *intel = intel_context( ctx );
315
316 intelFlush(ctx);
317 if (intel->batch->last_fence) {
318 dri_fence_wait(intel->batch->last_fence);
319 dri_fence_unreference(intel->batch->last_fence);
320 intel->batch->last_fence = NULL;
321 }
322 }
323
324 static void
325 intelBeginQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
326 {
327 struct intel_context *intel = intel_context( ctx );
328 struct drm_i915_mmio io = {
329 .read_write = I915_MMIO_READ,
330 .reg = MMIO_REGS_PS_DEPTH_COUNT,
331 .data = &q->Result
332 };
333 intel->stats_wm++;
334 intelFinish(&intel->ctx);
335 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
336 }
337
338 static void
339 intelEndQuery(GLcontext *ctx, GLenum target, struct gl_query_object *q)
340 {
341 struct intel_context *intel = intel_context( ctx );
342 GLuint64EXT tmp;
343 struct drm_i915_mmio io = {
344 .read_write = I915_MMIO_READ,
345 .reg = MMIO_REGS_PS_DEPTH_COUNT,
346 .data = &tmp
347 };
348 intelFinish(&intel->ctx);
349 drmCommandWrite(intel->driFd, DRM_I915_MMIO, &io, sizeof(io));
350 q->Result = tmp - q->Result;
351 q->Ready = GL_TRUE;
352 intel->stats_wm--;
353 }
354
355 /** Driver-specific fence emit implementation for the fake memory manager. */
356 static unsigned int
357 intel_fence_emit(void *private)
358 {
359 struct intel_context *intel = (struct intel_context *)private;
360 unsigned int fence;
361
362 /* XXX: Need to emit a flush, if we haven't already (at least with the
363 * current batchbuffer implementation, we have).
364 */
365
366 fence = intelEmitIrqLocked(intel);
367
368 return fence;
369 }
370
371 /** Driver-specific fence wait implementation for the fake memory manager. */
372 static int
373 intel_fence_wait(void *private, unsigned int cookie)
374 {
375 struct intel_context *intel = (struct intel_context *)private;
376
377 intelWaitIrq(intel, cookie);
378
379 return 0;
380 }
381
382 static GLboolean
383 intel_init_bufmgr(struct intel_context *intel)
384 {
385 intelScreenPrivate *intelScreen = intel->intelScreen;
386 GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
387
388 /* If we've got a new enough DDX that's initializing TTM and giving us
389 * object handles for the shared buffers, use that.
390 */
391 intel->ttm = GL_FALSE;
392 if (!ttm_disable &&
393 intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
394 intel->intelScreen->drmMinor >= 11 &&
395 intel->intelScreen->front.bo_handle != -1)
396 {
397 intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
398 DRM_FENCE_TYPE_EXE,
399 DRM_FENCE_TYPE_EXE |
400 DRM_I915_FENCE_TYPE_RW,
401 BATCH_SZ);
402 if (intel->bufmgr != NULL)
403 intel->ttm = GL_TRUE;
404 }
405 /* Otherwise, use the classic buffer manager. */
406 if (intel->bufmgr == NULL) {
407 if (ttm_disable) {
408 fprintf(stderr, "TTM buffer manager disabled. Using classic.\n");
409 } else {
410 fprintf(stderr, "Failed to initialize TTM buffer manager. "
411 "Falling back to classic.\n");
412 }
413
414 if (intelScreen->tex.size == 0) {
415 fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
416 __func__, __LINE__);
417 return GL_FALSE;
418 }
419
420 intel->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
421 intelScreen->tex.map,
422 intelScreen->tex.size,
423 intel_fence_emit,
424 intel_fence_wait,
425 intel);
426 }
427
428 return GL_TRUE;
429 }
430
431
432 void intelInitDriverFunctions( struct dd_function_table *functions )
433 {
434 _mesa_init_driver_functions( functions );
435
436 functions->Flush = intelFlush;
437 functions->Finish = intelFinish;
438 functions->GetString = intelGetString;
439 functions->UpdateState = intelInvalidateState;
440 functions->BeginQuery = intelBeginQuery;
441 functions->EndQuery = intelEndQuery;
442
443 /* CopyPixels can be accelerated even with the current memory
444 * manager:
445 */
446 if (!getenv("INTEL_NO_BLIT")) {
447 functions->CopyPixels = intelCopyPixels;
448 functions->Bitmap = intelBitmap;
449 }
450
451 intelInitTextureFuncs( functions );
452 intelInitStateFuncs( functions );
453 intelInitBufferFuncs( functions );
454 }
455
456 GLboolean intelInitContext( struct intel_context *intel,
457 const __GLcontextModes *mesaVis,
458 __DRIcontextPrivate *driContextPriv,
459 void *sharedContextPrivate,
460 struct dd_function_table *functions )
461 {
462 GLcontext *ctx = &intel->ctx;
463 GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
464 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
465 intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
466 volatile struct drm_i915_sarea *saPriv = (struct drm_i915_sarea *)
467 (((GLubyte *)sPriv->pSAREA)+intelScreen->sarea_priv_offset);
468
469 if (!_mesa_initialize_context(&intel->ctx,
470 mesaVis, shareCtx,
471 functions,
472 (void*) intel)) {
473 _mesa_printf("%s: failed to init mesa context\n", __FUNCTION__);
474 return GL_FALSE;
475 }
476
477 driContextPriv->driverPrivate = intel;
478 intel->intelScreen = intelScreen;
479 intel->driScreen = sPriv;
480 intel->sarea = saPriv;
481
482 /* Dri stuff */
483 intel->hHWContext = driContextPriv->hHWContext;
484 intel->driFd = sPriv->fd;
485 intel->driHwLock = (drmLock *) &sPriv->pSAREA->lock;
486
487 intel->maxBatchSize = BATCH_SZ;
488
489 if (!intel_init_bufmgr(intel))
490 return GL_FALSE;
491
492 driParseConfigFiles (&intel->optionCache, &intelScreen->optionCache,
493 intel->driScreen->myNum, "i965");
494
495 ctx->Const.MaxTextureMaxAnisotropy = 2.0;
496
497 if (getenv("INTEL_STRICT_CONFORMANCE")) {
498 intel->strict_conformance = 1;
499 }
500
501 if (intel->strict_conformance) {
502 ctx->Const.MinLineWidth = 1.0;
503 ctx->Const.MinLineWidthAA = 1.0;
504 ctx->Const.MaxLineWidth = 1.0;
505 ctx->Const.MaxLineWidthAA = 1.0;
506 ctx->Const.LineWidthGranularity = 1.0;
507 }
508 else {
509 ctx->Const.MinLineWidth = 1.0;
510 ctx->Const.MinLineWidthAA = 1.0;
511 ctx->Const.MaxLineWidth = 5.0;
512 ctx->Const.MaxLineWidthAA = 5.0;
513 ctx->Const.LineWidthGranularity = 0.5;
514 }
515
516 ctx->Const.MinPointSize = 1.0;
517 ctx->Const.MinPointSizeAA = 1.0;
518 ctx->Const.MaxPointSize = 255.0;
519 ctx->Const.MaxPointSizeAA = 3.0;
520 ctx->Const.PointSizeGranularity = 1.0;
521
522 /* reinitialize the context point state.
523 * It depend on constants in __GLcontextRec::Const
524 */
525 _mesa_init_point(ctx);
526
527 /* Initialize the software rasterizer and helper modules. */
528 _swrast_CreateContext( ctx );
529 _vbo_CreateContext( ctx );
530 _tnl_CreateContext( ctx );
531 _swsetup_CreateContext( ctx );
532
533 TNL_CONTEXT(ctx)->Driver.RunPipeline = _tnl_run_pipeline;
534
535 /* Configure swrast to match hardware characteristics: */
536 _swrast_allow_pixel_fog( ctx, GL_FALSE );
537 _swrast_allow_vertex_fog( ctx, GL_TRUE );
538
539 intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
540 intel->hw_stipple = 1;
541
542 switch(mesaVis->depthBits) {
543 case 0: /* what to do in this case? */
544 case 16:
545 intel->polygon_offset_scale = 1.0/0xffff;
546 break;
547 case 24:
548 intel->polygon_offset_scale = 2.0/0xffffff; /* req'd to pass glean */
549 break;
550 default:
551 assert(0);
552 break;
553 }
554
555 /* Initialize swrast, tnl driver tables: */
556 intelInitSpanFuncs( ctx );
557
558 if (!intel->intelScreen->irq_active) {
559 _mesa_printf("IRQs not active. Exiting\n");
560 exit(1);
561 }
562 intelInitExtensions(ctx, GL_TRUE);
563
564 INTEL_DEBUG = driParseDebugString( getenv( "INTEL_DEBUG" ),
565 debug_control );
566 if (INTEL_DEBUG & DEBUG_BUFMGR)
567 dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
568
569 intel_recreate_static_regions(intel);
570
571 intel_bufferobj_init( intel );
572 intel_fbo_init( intel );
573
574 intel->batch = intel_batchbuffer_alloc( intel );
575 intel->last_swap_fence = NULL;
576 intel->first_swap_fence = NULL;
577
578 if (intel->ctx.Mesa_DXTn) {
579 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
580 _mesa_enable_extension( ctx, "GL_S3_s3tc" );
581 }
582 else if (driQueryOptionb (&intel->optionCache, "force_s3tc_enable")) {
583 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
584 }
585
586 /* driInitTextureObjects( ctx, & intel->swapped, */
587 /* DRI_TEXMGR_DO_TEXTURE_1D | */
588 /* DRI_TEXMGR_DO_TEXTURE_2D | */
589 /* DRI_TEXMGR_DO_TEXTURE_RECT ); */
590
591 /* Force all software fallbacks */
592 if (driQueryOptionb(&intel->optionCache, "no_rast")) {
593 fprintf(stderr, "disabling 3D rasterization\n");
594 intel->no_rast = 1;
595 }
596
597 /* Disable all hardware rendering (skip emitting batches and fences/waits
598 * to the kernel)
599 */
600 intel->no_hw = getenv("INTEL_NO_HW") != NULL;
601
602 return GL_TRUE;
603 }
604
605 void intelDestroyContext(__DRIcontextPrivate *driContextPriv)
606 {
607 struct intel_context *intel = (struct intel_context *) driContextPriv->driverPrivate;
608
609 assert(intel); /* should never be null */
610 if (intel) {
611 GLboolean release_texture_heaps;
612
613
614 intel->vtbl.destroy( intel );
615
616 release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
617 _swsetup_DestroyContext (&intel->ctx);
618 _tnl_DestroyContext (&intel->ctx);
619 _vbo_DestroyContext (&intel->ctx);
620
621 _swrast_DestroyContext (&intel->ctx);
622 intel->Fallback = 0; /* don't call _swrast_Flush later */
623 intel_batchbuffer_free(intel->batch);
624 intel->batch = NULL;
625
626 if (intel->last_swap_fence) {
627 dri_fence_wait(intel->last_swap_fence);
628 dri_fence_unreference(intel->last_swap_fence);
629 intel->last_swap_fence = NULL;
630 }
631 if (intel->first_swap_fence) {
632 dri_fence_wait(intel->first_swap_fence);
633 dri_fence_unreference(intel->first_swap_fence);
634 intel->first_swap_fence = NULL;
635 }
636
637 if ( release_texture_heaps ) {
638 /* This share group is about to go away, free our private
639 * texture object data.
640 */
641
642 /* XXX: destroy the shared bufmgr struct here?
643 */
644 }
645
646 /* free the Mesa context */
647 intel->ctx.VertexProgram.Current = NULL;
648 intel->ctx.FragmentProgram.Current = NULL;
649 _mesa_destroy_context(&intel->ctx);
650 }
651
652 driContextPriv->driverPrivate = NULL;
653 }
654
655 GLboolean intelUnbindContext(__DRIcontextPrivate *driContextPriv)
656 {
657 return GL_TRUE;
658 }
659
660 GLboolean intelMakeCurrent(__DRIcontextPrivate *driContextPriv,
661 __DRIdrawablePrivate *driDrawPriv,
662 __DRIdrawablePrivate *driReadPriv)
663 {
664
665 if (driContextPriv) {
666 struct intel_context *intel =
667 (struct intel_context *) driContextPriv->driverPrivate;
668 struct intel_framebuffer *intel_fb =
669 (struct intel_framebuffer *) driDrawPriv->driverPrivate;
670 GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
671
672 /* XXX FBO temporary fix-ups! */
673 /* if the renderbuffers don't have regions, init them from the context.
674 * They will be unreferenced when the renderbuffer is destroyed.
675 */
676 {
677 struct intel_renderbuffer *irbDepth
678 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_DEPTH);
679 struct intel_renderbuffer *irbStencil
680 = intel_get_renderbuffer(&intel_fb->Base, BUFFER_STENCIL);
681
682 if (intel_fb->color_rb[0]) {
683 intel_renderbuffer_set_region(intel_fb->color_rb[0],
684 intel->front_region);
685 }
686 if (intel_fb->color_rb[1]) {
687 intel_renderbuffer_set_region(intel_fb->color_rb[1],
688 intel->back_region);
689 }
690
691 #if 0
692 if (intel_fb->color_rb[2]) {
693 intel_renderbuffer_set_region(intel_fb->color_rb[2],
694 intel->third_region);
695 }
696 #endif
697 if (irbDepth) {
698 intel_renderbuffer_set_region(irbDepth, intel->depth_region);
699 }
700 if (irbStencil) {
701 intel_renderbuffer_set_region(irbStencil, intel->depth_region);
702 }
703 }
704
705 /* set GLframebuffer size to match window, if needed */
706 driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
707
708 if (intel->driReadDrawable != driReadPriv) {
709 intel->driReadDrawable = driReadPriv;
710 }
711
712 if ( intel->driDrawable != driDrawPriv ) {
713 if (driDrawPriv->swap_interval == (unsigned)-1) {
714 driDrawPriv->vblFlags = (intel->intelScreen->irq_active != 0)
715 ? driGetDefaultVBlankFlags(&intel->optionCache)
716 : VBLANK_FLAG_NO_IRQ;
717 driDrawableInitVBlank( driDrawPriv );
718 }
719
720 intel->driDrawable = driDrawPriv;
721 intelWindowMoved( intel );
722 /* Shouldn't the readbuffer be stored also? */
723 }
724
725 _mesa_make_current(&intel->ctx,
726 &intel_fb->Base,
727 readFb);
728
729 intel_draw_buffer(&intel->ctx, &intel_fb->Base);
730 } else {
731 _mesa_make_current(NULL, NULL, NULL);
732 }
733
734 return GL_TRUE;
735 }
736
737
738 static void intelContendedLock( struct intel_context *intel, GLuint flags )
739 {
740 __DRIdrawablePrivate *dPriv = intel->driDrawable;
741 __DRIscreenPrivate *sPriv = intel->driScreen;
742 volatile struct drm_i915_sarea * sarea = intel->sarea;
743 int me = intel->hHWContext;
744
745 drmGetLock(intel->driFd, intel->hHWContext, flags);
746
747 /* If the window moved, may need to set a new cliprect now.
748 *
749 * NOTE: This releases and regains the hw lock, so all state
750 * checking must be done *after* this call:
751 */
752 if (dPriv)
753 DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
754
755
756 intel->locked = 1;
757
758 /* Lost context?
759 */
760 if (sarea->ctxOwner != me) {
761 if (INTEL_DEBUG & DEBUG_BUFMGR) {
762 fprintf(stderr, "Lost Context: sarea->ctxOwner %x me %x\n",
763 sarea->ctxOwner, me);
764 }
765 sarea->ctxOwner = me;
766 }
767
768 /* If the last consumer of the texture memory wasn't us, notify the fake
769 * bufmgr and record the new owner. We should have the memory shared
770 * between contexts of a single fake bufmgr, but this will at least make
771 * things correct for now.
772 */
773 if (!intel->ttm && sarea->texAge != intel->hHWContext) {
774 sarea->texAge = intel->hHWContext;
775 dri_bufmgr_fake_contended_lock_take(intel->bufmgr);
776 if (INTEL_DEBUG & DEBUG_BATCH)
777 intel_decode_context_reset();
778 if (INTEL_DEBUG & DEBUG_BUFMGR) {
779 fprintf(stderr, "Lost Textures: sarea->texAge %x hw context %x\n",
780 sarea->ctxOwner, intel->hHWContext);
781 }
782 }
783
784 /* Drawable changed?
785 */
786 if (dPriv && intel->lastStamp != dPriv->lastStamp) {
787 intelWindowMoved( intel );
788 intel->lastStamp = dPriv->lastStamp;
789 }
790 }
791
792 _glthread_DECLARE_STATIC_MUTEX(lockMutex);
793
794 /* Lock the hardware and validate our state.
795 */
796 void LOCK_HARDWARE( struct intel_context *intel )
797 {
798 char __ret=0;
799
800 _glthread_LOCK_MUTEX(lockMutex);
801 assert(!intel->locked);
802
803
804 DRM_CAS(intel->driHwLock, intel->hHWContext,
805 (DRM_LOCK_HELD|intel->hHWContext), __ret);
806 if (__ret)
807 intelContendedLock( intel, 0 );
808
809 intel->locked = 1;
810
811 }
812
813
814 /* Unlock the hardware using the global current context
815 */
816 void UNLOCK_HARDWARE( struct intel_context *intel )
817 {
818 intel->vtbl.note_unlock( intel );
819 intel->locked = 0;
820
821 DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
822 _glthread_UNLOCK_MUTEX(lockMutex);
823
824 /**
825 * Nothing should be left in batch outside of LOCK/UNLOCK which references
826 * cliprects.
827 */
828 assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
829 }
830
831