Merge branch 'origin' into glsl-compiler-1
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_context.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_context.c,v 1.9 2003/09/24 02:43:12 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 * Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38 #include "glheader.h"
39 #include "api_arrayelt.h"
40 #include "context.h"
41 #include "simple_list.h"
42 #include "imports.h"
43 #include "matrix.h"
44 #include "extensions.h"
45 #include "framebuffer.h"
46
47 #include "swrast/swrast.h"
48 #include "swrast_setup/swrast_setup.h"
49 #include "vbo/vbo.h"
50
51 #include "tnl/tnl.h"
52 #include "tnl/t_pipeline.h"
53
54 #include "drivers/common/driverfuncs.h"
55
56 #include "radeon_context.h"
57 #include "radeon_ioctl.h"
58 #include "radeon_state.h"
59 #include "radeon_span.h"
60 #include "radeon_tex.h"
61 #include "radeon_swtcl.h"
62 #include "radeon_tcl.h"
63 #include "radeon_maos.h"
64
65 #define need_GL_ARB_multisample
66 #define need_GL_ARB_texture_compression
67 #define need_GL_EXT_blend_minmax
68 #define need_GL_EXT_fog_coord
69 #define need_GL_EXT_secondary_color
70 #include "extension_helper.h"
71
72 #define DRIVER_DATE "20061018"
73
74 #include "vblank.h"
75 #include "utils.h"
76 #include "xmlpool.h" /* for symbolic values of enum-type options */
77 #ifndef RADEON_DEBUG
78 int RADEON_DEBUG = (0);
79 #endif
80
81
82 /* Return various strings for glGetString().
83 */
84 static const GLubyte *radeonGetString( GLcontext *ctx, GLenum name )
85 {
86 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
87 static char buffer[128];
88 unsigned offset;
89 GLuint agp_mode = (rmesa->radeonScreen->card_type==RADEON_CARD_PCI) ? 0 :
90 rmesa->radeonScreen->AGPMode;
91
92 switch ( name ) {
93 case GL_VENDOR:
94 return (GLubyte *)"Tungsten Graphics, Inc.";
95
96 case GL_RENDERER:
97 offset = driGetRendererString( buffer, "Radeon", DRIVER_DATE,
98 agp_mode );
99
100 sprintf( & buffer[ offset ], " %sTCL",
101 !(rmesa->TclFallback & RADEON_TCL_FALLBACK_TCL_DISABLE)
102 ? "" : "NO-" );
103
104 return (GLubyte *)buffer;
105
106 default:
107 return NULL;
108 }
109 }
110
111
112 /* Extension strings exported by the R100 driver.
113 */
114 const struct dri_extension card_extensions[] =
115 {
116 { "GL_ARB_multisample", GL_ARB_multisample_functions },
117 { "GL_ARB_multitexture", NULL },
118 { "GL_ARB_texture_border_clamp", NULL },
119 { "GL_ARB_texture_compression", GL_ARB_texture_compression_functions },
120 { "GL_ARB_texture_env_add", NULL },
121 { "GL_ARB_texture_env_combine", NULL },
122 { "GL_ARB_texture_env_crossbar", NULL },
123 { "GL_ARB_texture_env_dot3", NULL },
124 { "GL_ARB_texture_mirrored_repeat", NULL },
125 { "GL_EXT_blend_logic_op", NULL },
126 { "GL_EXT_blend_subtract", GL_EXT_blend_minmax_functions },
127 { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions },
128 { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions },
129 { "GL_EXT_stencil_wrap", NULL },
130 { "GL_EXT_texture_edge_clamp", NULL },
131 { "GL_EXT_texture_env_combine", NULL },
132 { "GL_EXT_texture_env_dot3", NULL },
133 { "GL_EXT_texture_filter_anisotropic", NULL },
134 { "GL_EXT_texture_lod_bias", NULL },
135 { "GL_EXT_texture_mirror_clamp", NULL },
136 { "GL_ATI_texture_env_combine3", NULL },
137 { "GL_ATI_texture_mirror_once", NULL },
138 { "GL_MESA_ycbcr_texture", NULL },
139 { "GL_NV_blend_square", NULL },
140 { "GL_SGIS_generate_mipmap", NULL },
141 { NULL, NULL }
142 };
143
144 extern const struct tnl_pipeline_stage _radeon_render_stage;
145 extern const struct tnl_pipeline_stage _radeon_tcl_stage;
146
147 static const struct tnl_pipeline_stage *radeon_pipeline[] = {
148
149 /* Try and go straight to t&l
150 */
151 &_radeon_tcl_stage,
152
153 /* Catch any t&l fallbacks
154 */
155 &_tnl_vertex_transform_stage,
156 &_tnl_normal_transform_stage,
157 &_tnl_lighting_stage,
158 &_tnl_fog_coordinate_stage,
159 &_tnl_texgen_stage,
160 &_tnl_texture_transform_stage,
161
162 &_radeon_render_stage,
163 &_tnl_render_stage, /* FALLBACK: */
164 NULL,
165 };
166
167
168
169 /* Initialize the driver's misc functions.
170 */
171 static void radeonInitDriverFuncs( struct dd_function_table *functions )
172 {
173 functions->GetString = radeonGetString;
174 }
175
176 static const struct dri_debug_control debug_control[] =
177 {
178 { "fall", DEBUG_FALLBACKS },
179 { "tex", DEBUG_TEXTURE },
180 { "ioctl", DEBUG_IOCTL },
181 { "prim", DEBUG_PRIMS },
182 { "vert", DEBUG_VERTS },
183 { "state", DEBUG_STATE },
184 { "code", DEBUG_CODEGEN },
185 { "vfmt", DEBUG_VFMT },
186 { "vtxf", DEBUG_VFMT },
187 { "verb", DEBUG_VERBOSE },
188 { "dri", DEBUG_DRI },
189 { "dma", DEBUG_DMA },
190 { "san", DEBUG_SANITY },
191 { "sync", DEBUG_SYNC },
192 { NULL, 0 }
193 };
194
195
196 /* Create the device specific context.
197 */
198 GLboolean
199 radeonCreateContext( const __GLcontextModes *glVisual,
200 __DRIcontextPrivate *driContextPriv,
201 void *sharedContextPrivate)
202 {
203 __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
204 radeonScreenPtr screen = (radeonScreenPtr)(sPriv->private);
205 struct dd_function_table functions;
206 radeonContextPtr rmesa;
207 GLcontext *ctx, *shareCtx;
208 int i;
209 int tcl_mode, fthrottle_mode;
210
211 assert(glVisual);
212 assert(driContextPriv);
213 assert(screen);
214
215 /* Allocate the Radeon context */
216 rmesa = (radeonContextPtr) CALLOC( sizeof(*rmesa) );
217 if ( !rmesa )
218 return GL_FALSE;
219
220 /* init exp fog table data */
221 radeonInitStaticFogData();
222
223 /* Parse configuration files.
224 * Do this here so that initialMaxAnisotropy is set before we create
225 * the default textures.
226 */
227 driParseConfigFiles (&rmesa->optionCache, &screen->optionCache,
228 screen->driScreen->myNum, "radeon");
229 rmesa->initialMaxAnisotropy = driQueryOptionf(&rmesa->optionCache,
230 "def_max_anisotropy");
231
232 if ( driQueryOptionb( &rmesa->optionCache, "hyperz" ) ) {
233 if ( sPriv->drmMinor < 13 )
234 fprintf( stderr, "DRM version 1.%d too old to support HyperZ, "
235 "disabling.\n",sPriv->drmMinor );
236 else
237 rmesa->using_hyperz = GL_TRUE;
238 }
239
240 if ( sPriv->drmMinor >= 15 )
241 rmesa->texmicrotile = GL_TRUE;
242
243 /* Init default driver functions then plug in our Radeon-specific functions
244 * (the texture functions are especially important)
245 */
246 _mesa_init_driver_functions( &functions );
247 radeonInitDriverFuncs( &functions );
248 radeonInitTextureFuncs( &functions );
249
250 /* Allocate the Mesa context */
251 if (sharedContextPrivate)
252 shareCtx = ((radeonContextPtr) sharedContextPrivate)->glCtx;
253 else
254 shareCtx = NULL;
255 rmesa->glCtx = _mesa_create_context(glVisual, shareCtx,
256 &functions, (void *) rmesa);
257 if (!rmesa->glCtx) {
258 FREE(rmesa);
259 return GL_FALSE;
260 }
261 driContextPriv->driverPrivate = rmesa;
262
263 /* Init radeon context data */
264 rmesa->dri.context = driContextPriv;
265 rmesa->dri.screen = sPriv;
266 rmesa->dri.drawable = NULL;
267 rmesa->dri.readable = NULL;
268 rmesa->dri.hwContext = driContextPriv->hHWContext;
269 rmesa->dri.hwLock = &sPriv->pSAREA->lock;
270 rmesa->dri.fd = sPriv->fd;
271 rmesa->dri.drmMinor = sPriv->drmMinor;
272
273 rmesa->radeonScreen = screen;
274 rmesa->sarea = (drm_radeon_sarea_t *)((GLubyte *)sPriv->pSAREA +
275 screen->sarea_priv_offset);
276
277
278 rmesa->dma.buf0_address = rmesa->radeonScreen->buffers->list[0].address;
279
280 (void) memset( rmesa->texture_heaps, 0, sizeof( rmesa->texture_heaps ) );
281 make_empty_list( & rmesa->swapped );
282
283 rmesa->nr_heaps = screen->numTexHeaps;
284 for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) {
285 rmesa->texture_heaps[i] = driCreateTextureHeap( i, rmesa,
286 screen->texSize[i],
287 12,
288 RADEON_NR_TEX_REGIONS,
289 (drmTextureRegionPtr)rmesa->sarea->tex_list[i],
290 & rmesa->sarea->tex_age[i],
291 & rmesa->swapped,
292 sizeof( radeonTexObj ),
293 (destroy_texture_object_t *) radeonDestroyTexObj );
294
295 driSetTextureSwapCounterLocation( rmesa->texture_heaps[i],
296 & rmesa->c_textureSwaps );
297 }
298 rmesa->texture_depth = driQueryOptioni (&rmesa->optionCache,
299 "texture_depth");
300 if (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FB)
301 rmesa->texture_depth = ( screen->cpp == 4 ) ?
302 DRI_CONF_TEXTURE_DEPTH_32 : DRI_CONF_TEXTURE_DEPTH_16;
303
304 rmesa->swtcl.RenderIndex = ~0;
305 rmesa->hw.all_dirty = GL_TRUE;
306
307 /* Set the maximum texture size small enough that we can guarentee that
308 * all texture units can bind a maximal texture and have all of them in
309 * texturable memory at once. Depending on the allow_large_textures driconf
310 * setting allow larger textures.
311 */
312
313 ctx = rmesa->glCtx;
314 ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->optionCache,
315 "texture_units");
316 ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
317 ctx->Const.MaxTextureCoordUnits = ctx->Const.MaxTextureUnits;
318
319 i = driQueryOptioni( &rmesa->optionCache, "allow_large_textures");
320
321 driCalculateMaxTextureLevels( rmesa->texture_heaps,
322 rmesa->nr_heaps,
323 & ctx->Const,
324 4,
325 11, /* max 2D texture size is 2048x2048 */
326 8, /* 256^3 */
327 9, /* \todo: max cube texture size seems to be 512x512(x6) */
328 11, /* max rect texture size is 2048x2048. */
329 12,
330 GL_FALSE,
331 i );
332
333
334 ctx->Const.MaxTextureMaxAnisotropy = 16.0;
335
336 /* No wide points.
337 */
338 ctx->Const.MinPointSize = 1.0;
339 ctx->Const.MinPointSizeAA = 1.0;
340 ctx->Const.MaxPointSize = 1.0;
341 ctx->Const.MaxPointSizeAA = 1.0;
342
343 ctx->Const.MinLineWidth = 1.0;
344 ctx->Const.MinLineWidthAA = 1.0;
345 ctx->Const.MaxLineWidth = 10.0;
346 ctx->Const.MaxLineWidthAA = 10.0;
347 ctx->Const.LineWidthGranularity = 0.0625;
348
349 /* Set maxlocksize (and hence vb size) small enough to avoid
350 * fallbacks in radeon_tcl.c. ie. guarentee that all vertices can
351 * fit in a single dma buffer for indexed rendering of quad strips,
352 * etc.
353 */
354 ctx->Const.MaxArrayLockSize =
355 MIN2( ctx->Const.MaxArrayLockSize,
356 RADEON_BUFFER_SIZE / RADEON_MAX_TCL_VERTSIZE );
357
358 rmesa->boxes = 0;
359
360 /* Initialize the software rasterizer and helper modules.
361 */
362 _swrast_CreateContext( ctx );
363 _vbo_CreateContext( ctx );
364 _tnl_CreateContext( ctx );
365 _swsetup_CreateContext( ctx );
366 _ae_create_context( ctx );
367
368 /* Install the customized pipeline:
369 */
370 _tnl_destroy_pipeline( ctx );
371 _tnl_install_pipeline( ctx, radeon_pipeline );
372
373 /* Try and keep materials and vertices separate:
374 */
375 /* _tnl_isolate_materials( ctx, GL_TRUE ); */
376
377 /* Configure swrast and T&L to match hardware characteristics:
378 */
379 _swrast_allow_pixel_fog( ctx, GL_FALSE );
380 _swrast_allow_vertex_fog( ctx, GL_TRUE );
381 _tnl_allow_pixel_fog( ctx, GL_FALSE );
382 _tnl_allow_vertex_fog( ctx, GL_TRUE );
383
384
385 for ( i = 0 ; i < RADEON_MAX_TEXTURE_UNITS ; i++ ) {
386 _math_matrix_ctr( &rmesa->TexGenMatrix[i] );
387 _math_matrix_ctr( &rmesa->tmpmat[i] );
388 _math_matrix_set_identity( &rmesa->TexGenMatrix[i] );
389 _math_matrix_set_identity( &rmesa->tmpmat[i] );
390 }
391
392 driInitExtensions( ctx, card_extensions, GL_TRUE );
393 if (rmesa->radeonScreen->drmSupportsCubeMapsR100)
394 _mesa_enable_extension( ctx, "GL_ARB_texture_cube_map" );
395 if (rmesa->glCtx->Mesa_DXTn) {
396 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
397 _mesa_enable_extension( ctx, "GL_S3_s3tc" );
398 }
399 else if (driQueryOptionb (&rmesa->optionCache, "force_s3tc_enable")) {
400 _mesa_enable_extension( ctx, "GL_EXT_texture_compression_s3tc" );
401 }
402
403 if (rmesa->dri.drmMinor >= 9)
404 _mesa_enable_extension( ctx, "GL_NV_texture_rectangle");
405
406 /* XXX these should really go right after _mesa_init_driver_functions() */
407 radeonInitIoctlFuncs( ctx );
408 radeonInitStateFuncs( ctx );
409 radeonInitSpanFuncs( ctx );
410 radeonInitState( rmesa );
411 radeonInitSwtcl( ctx );
412
413 _mesa_vector4f_alloc( &rmesa->tcl.ObjClean, 0,
414 ctx->Const.MaxArrayLockSize, 32 );
415
416 fthrottle_mode = driQueryOptioni(&rmesa->optionCache, "fthrottle_mode");
417 rmesa->iw.irq_seq = -1;
418 rmesa->irqsEmitted = 0;
419 rmesa->do_irqs = (rmesa->radeonScreen->irq != 0 &&
420 fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
421
422 rmesa->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
423
424 rmesa->vblank_flags = (rmesa->radeonScreen->irq != 0)
425 ? driGetDefaultVBlankFlags(&rmesa->optionCache) : VBLANK_FLAG_NO_IRQ;
426
427 (*dri_interface->getUST)( & rmesa->swap_ust );
428
429
430 #if DO_DEBUG
431 RADEON_DEBUG = driParseDebugString( getenv( "RADEON_DEBUG" ),
432 debug_control );
433 #endif
434
435 tcl_mode = driQueryOptioni(&rmesa->optionCache, "tcl_mode");
436 if (driQueryOptionb(&rmesa->optionCache, "no_rast")) {
437 fprintf(stderr, "disabling 3D acceleration\n");
438 FALLBACK(rmesa, RADEON_FALLBACK_DISABLE, 1);
439 } else if (tcl_mode == DRI_CONF_TCL_SW ||
440 !(rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL)) {
441 if (rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
442 rmesa->radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
443 fprintf(stderr, "Disabling HW TCL support\n");
444 }
445 TCL_FALLBACK(rmesa->glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
446 }
447
448 if (rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
449 /* _tnl_need_dlist_norm_lengths( ctx, GL_FALSE ); */
450 }
451 return GL_TRUE;
452 }
453
454
455 /* Destroy the device specific context.
456 */
457 /* Destroy the Mesa and driver specific context data.
458 */
459 void radeonDestroyContext( __DRIcontextPrivate *driContextPriv )
460 {
461 GET_CURRENT_CONTEXT(ctx);
462 radeonContextPtr rmesa = (radeonContextPtr) driContextPriv->driverPrivate;
463 radeonContextPtr current = ctx ? RADEON_CONTEXT(ctx) : NULL;
464
465 /* check if we're deleting the currently bound context */
466 if (rmesa == current) {
467 RADEON_FIREVERTICES( rmesa );
468 _mesa_make_current(NULL, NULL, NULL);
469 }
470
471 /* Free radeon context resources */
472 assert(rmesa); /* should never be null */
473 if ( rmesa ) {
474 GLboolean release_texture_heaps;
475
476
477 release_texture_heaps = (rmesa->glCtx->Shared->RefCount == 1);
478 _swsetup_DestroyContext( rmesa->glCtx );
479 _tnl_DestroyContext( rmesa->glCtx );
480 _vbo_DestroyContext( rmesa->glCtx );
481 _swrast_DestroyContext( rmesa->glCtx );
482
483 radeonDestroySwtcl( rmesa->glCtx );
484 radeonReleaseArrays( rmesa->glCtx, ~0 );
485 if (rmesa->dma.current.buf) {
486 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
487 radeonFlushCmdBuf( rmesa, __FUNCTION__ );
488 }
489
490 _mesa_vector4f_free( &rmesa->tcl.ObjClean );
491
492 if (rmesa->state.scissor.pClipRects) {
493 FREE(rmesa->state.scissor.pClipRects);
494 rmesa->state.scissor.pClipRects = NULL;
495 }
496
497 if ( release_texture_heaps ) {
498 /* This share group is about to go away, free our private
499 * texture object data.
500 */
501 int i;
502
503 for ( i = 0 ; i < rmesa->nr_heaps ; i++ ) {
504 driDestroyTextureHeap( rmesa->texture_heaps[ i ] );
505 rmesa->texture_heaps[ i ] = NULL;
506 }
507
508 assert( is_empty_list( & rmesa->swapped ) );
509 }
510
511 /* free the Mesa context */
512 rmesa->glCtx->DriverCtx = NULL;
513 _mesa_destroy_context( rmesa->glCtx );
514
515 /* free the option cache */
516 driDestroyOptionCache (&rmesa->optionCache);
517
518 FREE( rmesa );
519 }
520 }
521
522
523
524
525 void
526 radeonSwapBuffers( __DRIdrawablePrivate *dPriv )
527 {
528
529 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
530 radeonContextPtr rmesa;
531 GLcontext *ctx;
532 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
533 ctx = rmesa->glCtx;
534 if (ctx->Visual.doubleBufferMode) {
535 _mesa_notifySwapBuffers( ctx ); /* flush pending rendering comands */
536
537 if ( rmesa->doPageFlip ) {
538 radeonPageFlip( dPriv );
539 }
540 else {
541 radeonCopyBuffer( dPriv, NULL );
542 }
543 }
544 }
545 else {
546 /* XXX this shouldn't be an error but we can't handle it for now */
547 _mesa_problem(NULL, "%s: drawable has no context!", __FUNCTION__);
548 }
549 }
550
551 void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
552 int x, int y, int w, int h )
553 {
554 if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
555 radeonContextPtr radeon;
556 GLcontext *ctx;
557
558 radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
559 ctx = radeon->glCtx;
560
561 if (ctx->Visual.doubleBufferMode) {
562 drm_clip_rect_t rect;
563 rect.x1 = x + dPriv->x;
564 rect.y1 = (dPriv->h - y - h) + dPriv->y;
565 rect.x2 = rect.x1 + w;
566 rect.y2 = rect.y1 + h;
567 _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
568 radeonCopyBuffer(dPriv, &rect);
569 }
570 } else {
571 /* XXX this shouldn't be an error but we can't handle it for now */
572 _mesa_problem(NULL, "%s: drawable has no context!",
573 __FUNCTION__);
574 }
575 }
576
577 /* Make context `c' the current context and bind it to the given
578 * drawing and reading surfaces.
579 */
580 GLboolean
581 radeonMakeCurrent( __DRIcontextPrivate *driContextPriv,
582 __DRIdrawablePrivate *driDrawPriv,
583 __DRIdrawablePrivate *driReadPriv )
584 {
585 if ( driContextPriv ) {
586 radeonContextPtr newCtx =
587 (radeonContextPtr) driContextPriv->driverPrivate;
588
589 if (RADEON_DEBUG & DEBUG_DRI)
590 fprintf(stderr, "%s ctx %p\n", __FUNCTION__, (void *) newCtx->glCtx);
591
592 if ( newCtx->dri.drawable != driDrawPriv ) {
593 /* XXX we may need to validate the drawable here!!! */
594 driDrawableInitVBlank( driDrawPriv, newCtx->vblank_flags,
595 &newCtx->vbl_seq );
596 }
597
598 if ( (newCtx->dri.drawable != driDrawPriv)
599 || (newCtx->dri.readable != driReadPriv) ) {
600 newCtx->dri.drawable = driDrawPriv;
601 newCtx->dri.readable = driReadPriv;
602
603 radeonUpdateWindow( newCtx->glCtx );
604 radeonUpdateViewportOffset( newCtx->glCtx );
605 }
606
607 _mesa_make_current( newCtx->glCtx,
608 (GLframebuffer *) driDrawPriv->driverPrivate,
609 (GLframebuffer *) driReadPriv->driverPrivate );
610
611 } else {
612 if (RADEON_DEBUG & DEBUG_DRI)
613 fprintf(stderr, "%s ctx is null\n", __FUNCTION__);
614 _mesa_make_current( NULL, NULL, NULL );
615 }
616
617 if (RADEON_DEBUG & DEBUG_DRI)
618 fprintf(stderr, "End %s\n", __FUNCTION__);
619 return GL_TRUE;
620 }
621
622 /* Force the context `c' to be unbound from its buffer.
623 */
624 GLboolean
625 radeonUnbindContext( __DRIcontextPrivate *driContextPriv )
626 {
627 radeonContextPtr rmesa = (radeonContextPtr) driContextPriv->driverPrivate;
628
629 if (RADEON_DEBUG & DEBUG_DRI)
630 fprintf(stderr, "%s ctx %p\n", __FUNCTION__, (void *) rmesa->glCtx);
631
632 return GL_TRUE;
633 }