2 Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 The Weather Channel (TM) funded Tungsten Graphics to develop the
5 initial release of the Radeon 8500 driver under the XFree86 license.
6 This notice must be preserved.
8 Permission is hereby granted, free of charge, to any person obtaining
9 a copy of this software and associated documentation files (the
10 "Software"), to deal in the Software without restriction, including
11 without limitation the rights to use, copy, modify, merge, publish,
12 distribute, sublicense, and/or sell copies of the Software, and to
13 permit persons to whom the Software is furnished to do so, subject to
14 the following conditions:
16 The above copyright notice and this permission notice (including the
17 next paragraph) shall be included in all copies or substantial
18 portions of the Software.
20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
24 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
25 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
26 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
32 * Keith Whitwell <keith@tungstengraphics.com>
33 * Nicolai Haehnle <prefect_@gmx.net>
36 #include "api_arrayelt.h"
38 #include "simple_list.h"
41 #include "extensions.h"
43 #include "bufferobj.h"
45 #include "swrast/swrast.h"
46 #include "swrast_setup/swrast_setup.h"
50 #include "tnl/t_pipeline.h"
51 #include "tnl/t_vp_build.h"
53 #include "drivers/common/driverfuncs.h"
55 #include "radeon_ioctl.h"
56 #include "radeon_span.h"
57 #include "r300_context.h"
58 #include "r300_cmdbuf.h"
59 #include "r300_state.h"
60 #include "r300_ioctl.h"
62 #include "r300_maos.h"
65 #include "radeon_mm.h"
70 #include "xmlpool.h" /* for symbolic values of enum-type options */
72 /* hw_tcl_on derives from future_hw_tcl_on when its safe to change it. */
73 int future_hw_tcl_on
=1;
76 #define need_GL_ARB_multisample
77 #define need_GL_ARB_texture_compression
78 #define need_GL_ARB_vertex_buffer_object
79 #define need_GL_ARB_vertex_program
80 #define need_GL_EXT_blend_minmax
81 //#define need_GL_EXT_fog_coord
82 #define need_GL_EXT_secondary_color
83 #define need_GL_EXT_blend_equation_separate
84 #define need_GL_EXT_blend_func_separate
85 #define need_GL_EXT_gpu_program_parameters
86 #define need_GL_NV_vertex_program
87 #include "extension_helper.h"
89 const struct dri_extension card_extensions
[] = {
90 {"GL_ARB_multisample", GL_ARB_multisample_functions
},
91 {"GL_ARB_multitexture", NULL
},
92 {"GL_ARB_texture_border_clamp", NULL
},
93 {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions
},
94 {"GL_ARB_texture_cube_map", NULL
},
95 {"GL_ARB_texture_env_add", NULL
},
96 {"GL_ARB_texture_env_combine", NULL
},
97 {"GL_ARB_texture_env_crossbar", NULL
},
98 {"GL_ARB_texture_env_dot3", NULL
},
99 {"GL_ARB_texture_mirrored_repeat", NULL
},
100 {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions
},
101 {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions
},
102 {"GL_ARB_fragment_program", NULL
},
103 {"GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions
},
104 {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions
},
105 {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions
},
106 {"GL_EXT_blend_subtract", NULL
},
107 // {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions },
108 {"GL_EXT_gpu_program_parameters", GL_EXT_gpu_program_parameters_functions
},
109 {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions
},
110 {"GL_EXT_stencil_wrap", NULL
},
111 {"GL_EXT_texture_edge_clamp", NULL
},
112 {"GL_EXT_texture_env_combine", NULL
},
113 {"GL_EXT_texture_env_dot3", NULL
},
114 {"GL_EXT_texture_filter_anisotropic", NULL
},
115 {"GL_EXT_texture_lod_bias", NULL
},
116 {"GL_EXT_texture_mirror_clamp", NULL
},
117 {"GL_EXT_texture_rectangle", NULL
},
118 {"GL_ATI_texture_env_combine3", NULL
},
119 {"GL_ATI_texture_mirror_once", NULL
},
120 {"GL_MESA_pack_invert", NULL
},
121 {"GL_MESA_ycbcr_texture", NULL
},
122 {"GL_MESAX_texture_float", NULL
},
123 {"GL_NV_blend_square", NULL
},
124 {"GL_NV_vertex_program", GL_NV_vertex_program_functions
},
125 {"GL_SGIS_generate_mipmap", NULL
},
129 extern struct tnl_pipeline_stage _r300_render_stage
;
130 extern const struct tnl_pipeline_stage _r300_tcl_stage
;
131 extern const struct tnl_pipeline_stage _r300_texrect_stage
;
133 static const struct tnl_pipeline_stage
*r300_pipeline
[] = {
135 /* Try and go straight to t&l
139 /* Catch any t&l fallbacks
141 &_tnl_vertex_transform_stage
,
142 &_tnl_normal_transform_stage
,
143 &_tnl_lighting_stage
,
144 &_tnl_fog_coordinate_stage
,
146 &_tnl_texture_transform_stage
,
147 &_tnl_arb_vertex_program_stage
,
148 &_tnl_vertex_program_stage
,
150 /* Try again to go to tcl?
151 * - no good for asymmetric-twoside (do with multipass)
152 * - no good for asymmetric-unfilled (do with multipass)
153 * - good for material
155 * - need to manipulate a bit of state
157 * - worth it/not worth it?
160 /* Else do them here.
162 /* scale texture rectangle to 0..1. */
163 &_r300_texrect_stage
,
165 &_tnl_render_stage
, /* FALLBACK */
170 /* Create the device specific rendering context.
172 GLboolean
r300CreateContext(const __GLcontextModes
* glVisual
,
173 __DRIcontextPrivate
* driContextPriv
,
174 void *sharedContextPrivate
)
176 __DRIscreenPrivate
*sPriv
= driContextPriv
->driScreenPriv
;
177 radeonScreenPtr screen
= (radeonScreenPtr
) (sPriv
->private);
178 struct dd_function_table functions
;
184 assert(driContextPriv
);
187 /* Allocate the R300 context */
188 r300
= (r300ContextPtr
)CALLOC(sizeof(*r300
));
192 /* Parse configuration files.
193 * Do this here so that initialMaxAnisotropy is set before we create
194 * the default textures.
196 driParseConfigFiles(&r300
->radeon
.optionCache
, &screen
->optionCache
,
197 screen
->driScreen
->myNum
, "r300");
199 //r300->texmicrotile = GL_TRUE;
201 /* Init default driver functions then plug in our R300-specific functions
202 * (the texture functions are especially important)
204 _mesa_init_driver_functions(&functions
);
205 r300InitIoctlFuncs(&functions
);
206 r300InitStateFuncs(&functions
);
207 r300InitTextureFuncs(&functions
);
208 r300InitShaderFuncs(&functions
);
211 radeon_mm_init(r300
);
215 r300_init_vbo_funcs(&functions
);
218 if (!radeonInitContext(&r300
->radeon
, &functions
,
219 glVisual
, driContextPriv
, sharedContextPrivate
)) {
224 /* Init r300 context data */
225 r300
->dma
.buf0_address
= r300
->radeon
.radeonScreen
->buffers
->list
[0].address
;
227 (void)memset(r300
->texture_heaps
, 0, sizeof(r300
->texture_heaps
));
228 make_empty_list(&r300
->swapped
);
230 r300
->nr_heaps
= 1 /* screen->numTexHeaps */ ;
231 assert(r300
->nr_heaps
< RADEON_NR_TEX_HEAPS
);
232 for (i
= 0; i
< r300
->nr_heaps
; i
++) {
233 r300
->texture_heaps
[i
] = driCreateTextureHeap(i
, r300
,
236 RADEON_NR_TEX_REGIONS
,
237 (drmTextureRegionPtr
)
240 &r300
->radeon
.sarea
->
245 (destroy_texture_object_t
249 r300
->texture_depth
= driQueryOptioni(&r300
->radeon
.optionCache
,
251 if (r300
->texture_depth
== DRI_CONF_TEXTURE_DEPTH_FB
)
252 r300
->texture_depth
= (screen
->cpp
== 4) ?
253 DRI_CONF_TEXTURE_DEPTH_32
: DRI_CONF_TEXTURE_DEPTH_16
;
255 /* Set the maximum texture size small enough that we can guarentee that
256 * all texture units can bind a maximal texture and have them both in
257 * texturable memory at once.
260 ctx
= r300
->radeon
.glCtx
;
262 ctx
->Const
.MaxTextureImageUnits
= driQueryOptioni(&r300
->radeon
.optionCache
,
263 "texture_image_units");
264 ctx
->Const
.MaxTextureCoordUnits
= driQueryOptioni(&r300
->radeon
.optionCache
,
265 "texture_coord_units");
266 ctx
->Const
.MaxTextureUnits
= MIN2(ctx
->Const
.MaxTextureImageUnits
,
267 ctx
->Const
.MaxTextureCoordUnits
);
268 ctx
->Const
.MaxTextureMaxAnisotropy
= 16.0;
270 ctx
->Const
.MinPointSize
= 1.0;
271 ctx
->Const
.MinPointSizeAA
= 1.0;
272 ctx
->Const
.MaxPointSize
= R300_POINTSIZE_MAX
;
273 ctx
->Const
.MaxPointSizeAA
= R300_POINTSIZE_MAX
;
275 ctx
->Const
.MinLineWidth
= 1.0;
276 ctx
->Const
.MinLineWidthAA
= 1.0;
277 ctx
->Const
.MaxLineWidth
= R300_LINESIZE_MAX
;
278 ctx
->Const
.MaxLineWidthAA
= R300_LINESIZE_MAX
;
281 /* Needs further modifications */
283 ctx
->Const
.MaxArrayLockSize
= (/*512*/RADEON_BUFFER_SIZE
*16*1024) / (4*4);
287 /* Initialize the software rasterizer and helper modules.
289 _swrast_CreateContext(ctx
);
290 _vbo_CreateContext(ctx
);
291 _tnl_CreateContext(ctx
);
292 _swsetup_CreateContext(ctx
);
293 _swsetup_Wakeup(ctx
);
294 _ae_create_context(ctx
);
296 /* Install the customized pipeline:
298 _tnl_destroy_pipeline(ctx
);
299 _tnl_install_pipeline(ctx
, r300_pipeline
);
301 /* Try and keep materials and vertices separate:
303 /* _tnl_isolate_materials(ctx, GL_TRUE); */
305 /* Configure swrast and TNL to match hardware characteristics:
307 _swrast_allow_pixel_fog(ctx
, GL_FALSE
);
308 _swrast_allow_vertex_fog(ctx
, GL_TRUE
);
309 _tnl_allow_pixel_fog(ctx
, GL_FALSE
);
310 _tnl_allow_vertex_fog(ctx
, GL_TRUE
);
312 /* currently bogus data */
313 ctx
->Const
.VertexProgram
.MaxInstructions
=VSF_MAX_FRAGMENT_LENGTH
/4;
314 ctx
->Const
.VertexProgram
.MaxNativeInstructions
=VSF_MAX_FRAGMENT_LENGTH
/4;
315 ctx
->Const
.VertexProgram
.MaxNativeAttribs
=16; /* r420 */
316 ctx
->Const
.VertexProgram
.MaxTemps
=32;
317 ctx
->Const
.VertexProgram
.MaxNativeTemps
=/*VSF_MAX_FRAGMENT_TEMPS*/32;
318 ctx
->Const
.VertexProgram
.MaxNativeParameters
=256; /* r420 */
319 ctx
->Const
.VertexProgram
.MaxNativeAddressRegs
=1;
321 ctx
->Const
.FragmentProgram
.MaxNativeTemps
= PFS_NUM_TEMP_REGS
;
322 ctx
->Const
.FragmentProgram
.MaxNativeAttribs
= 11; /* copy i915... */
323 ctx
->Const
.FragmentProgram
.MaxNativeParameters
= PFS_NUM_CONST_REGS
;
324 ctx
->Const
.FragmentProgram
.MaxNativeAluInstructions
= PFS_MAX_ALU_INST
;
325 ctx
->Const
.FragmentProgram
.MaxNativeTexInstructions
= PFS_MAX_TEX_INST
;
326 ctx
->Const
.FragmentProgram
.MaxNativeInstructions
= PFS_MAX_ALU_INST
+PFS_MAX_TEX_INST
;
327 ctx
->Const
.FragmentProgram
.MaxNativeTexIndirections
= PFS_MAX_TEX_INDIRECT
;
328 ctx
->Const
.FragmentProgram
.MaxNativeAddressRegs
= 0; /* and these are?? */
329 _tnl_ProgramCacheInit(ctx
);
330 ctx
->_MaintainTexEnvProgram
= GL_TRUE
;
332 driInitExtensions(ctx
, card_extensions
, GL_TRUE
);
334 if (r300
->radeon
.glCtx
->Mesa_DXTn
&& !driQueryOptionb (&r300
->radeon
.optionCache
, "disable_s3tc")) {
335 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
336 _mesa_enable_extension( ctx
, "GL_S3_s3tc" );
338 else if (driQueryOptionb (&r300
->radeon
.optionCache
, "force_s3tc_enable")) {
339 _mesa_enable_extension( ctx
, "GL_EXT_texture_compression_s3tc" );
342 r300
->disable_lowimpact_fallback
= driQueryOptionb(&r300
->radeon
.optionCache
, "disable_lowimpact_fallback");
344 radeonInitSpanFuncs(ctx
);
345 r300InitCmdBuf(r300
);
348 #ifdef RADEON_VTXFMT_A
349 radeon_init_vtxfmt_a(r300
);
353 /* plug in a few more device driver functions */
354 /* XXX these should really go right after _mesa_init_driver_functions() */
355 r300InitPixelFuncs(ctx
);
358 TNL_CONTEXT(ctx
)->Driver
.RunPipeline
= _tnl_run_pipeline
;
360 tcl_mode
= driQueryOptioni(&r300
->radeon
.optionCache
, "tcl_mode");
361 if (driQueryOptionb(&r300
->radeon
.optionCache
, "no_rast")) {
362 fprintf(stderr
, "disabling 3D acceleration\n");
364 FALLBACK(&r300
->radeon
, RADEON_FALLBACK_DISABLE
, 1);
367 if (tcl_mode
== DRI_CONF_TCL_SW
||
368 !(r300
->radeon
.radeonScreen
->chip_flags
& RADEON_CHIPSET_TCL
)) {
369 if (r300
->radeon
.radeonScreen
->chip_flags
& RADEON_CHIPSET_TCL
) {
370 r300
->radeon
.radeonScreen
->chip_flags
&= ~RADEON_CHIPSET_TCL
;
371 fprintf(stderr
, "Disabling HW TCL support\n");
373 TCL_FALLBACK(r300
->radeon
.glCtx
, RADEON_TCL_FALLBACK_TCL_DISABLE
, 1);
379 static void r300FreeGartAllocations(r300ContextPtr r300
)
381 int i
, ret
, tries
=0, done_age
, in_use
=0;
382 drm_radeon_mem_free_t memfree
;
384 memfree
.region
= RADEON_MEM_REGION_GART
;
387 for (i
= r300
->rmm
->u_last
; i
> 0; i
--) {
388 if (r300
->rmm
->u_list
[i
].ptr
== NULL
) {
392 /* check whether this buffer is still in use */
393 if (r300
->rmm
->u_list
[i
].pending
) {
397 /* Cannot flush/lock if no context exists. */
399 r300FlushCmdBuf(r300
, __FUNCTION__
);
401 done_age
= radeonGetAge((radeonContextPtr
)r300
);
403 for (i
= r300
->rmm
->u_last
; i
> 0; i
--) {
404 if (r300
->rmm
->u_list
[i
].ptr
== NULL
) {
408 /* check whether this buffer is still in use */
409 if (!r300
->rmm
->u_list
[i
].pending
) {
413 assert(r300
->rmm
->u_list
[i
].h_pending
== 0);
416 while(r300
->rmm
->u_list
[i
].age
> done_age
&& tries
++ < 1000) {
418 done_age
= radeonGetAge((radeonContextPtr
)r300
);
421 WARN_ONCE("Failed to idle region!");
424 memfree
.region_offset
= (char *)r300
->rmm
->u_list
[i
].ptr
-
425 (char *)r300
->radeon
.radeonScreen
->gartTextures
.map
;
427 ret
= drmCommandWrite(r300
->radeon
.radeonScreen
->driScreen
->fd
,
428 DRM_RADEON_FREE
, &memfree
, sizeof(memfree
));
430 fprintf(stderr
, "Failed to free at %p\nret = %s\n",
431 r300
->rmm
->u_list
[i
].ptr
, strerror(-ret
));
433 if (i
== r300
->rmm
->u_last
)
436 r300
->rmm
->u_list
[i
].pending
= 0;
437 r300
->rmm
->u_list
[i
].ptr
= NULL
;
438 if (r300
->rmm
->u_list
[i
].fb
) {
439 LOCK_HARDWARE(&(r300
->radeon
));
440 ret
= mmFreeMem(r300
->rmm
->u_list
[i
].fb
);
441 UNLOCK_HARDWARE(&(r300
->radeon
));
442 if (ret
) fprintf(stderr
, "failed to free!\n");
443 r300
->rmm
->u_list
[i
].fb
= NULL
;
445 r300
->rmm
->u_list
[i
].ref_count
= 0;
448 r300
->rmm
->u_head
= i
;
449 #endif /* USER_BUFFERS */
452 /* Destroy the device specific context.
454 void r300DestroyContext(__DRIcontextPrivate
* driContextPriv
)
456 GET_CURRENT_CONTEXT(ctx
);
457 r300ContextPtr r300
= (r300ContextPtr
) driContextPriv
->driverPrivate
;
458 radeonContextPtr radeon
= (radeonContextPtr
) r300
;
459 radeonContextPtr current
= ctx
? RADEON_CONTEXT(ctx
) : NULL
;
461 if (RADEON_DEBUG
& DEBUG_DRI
) {
462 fprintf(stderr
, "Destroying context !\n");
465 /* check if we're deleting the currently bound context */
466 if (&r300
->radeon
== current
) {
467 radeonFlush(r300
->radeon
.glCtx
);
468 _mesa_make_current(NULL
, NULL
, NULL
);
471 /* Free r300 context resources */
472 assert(r300
); /* should never be null */
475 GLboolean release_texture_heaps
;
477 release_texture_heaps
= (r300
->radeon
.glCtx
->Shared
->RefCount
== 1);
478 _swsetup_DestroyContext(r300
->radeon
.glCtx
);
479 _tnl_ProgramCacheDestroy(r300
->radeon
.glCtx
);
480 _tnl_DestroyContext(r300
->radeon
.glCtx
);
481 _vbo_DestroyContext(r300
->radeon
.glCtx
);
482 _swrast_DestroyContext(r300
->radeon
.glCtx
);
484 if (r300
->dma
.current
.buf
) {
485 r300ReleaseDmaRegion(r300
, &r300
->dma
.current
, __FUNCTION__
);
487 r300FlushCmdBuf(r300
, __FUNCTION__
);
490 r300FreeGartAllocations(r300
);
491 r300DestroyCmdBuf(r300
);
493 if (radeon
->state
.scissor
.pClipRects
) {
494 FREE(radeon
->state
.scissor
.pClipRects
);
495 radeon
->state
.scissor
.pClipRects
= NULL
;
498 if (release_texture_heaps
) {
499 /* This share group is about to go away, free our private
500 * texture object data.
504 for (i
= 0; i
< r300
->nr_heaps
; i
++) {
505 driDestroyTextureHeap(r300
->texture_heaps
[i
]);
506 r300
->texture_heaps
[i
] = NULL
;
509 assert(is_empty_list(&r300
->swapped
));
512 radeonCleanupContext(&r300
->radeon
);
515 /* the memory manager might be accessed when Mesa frees the shared
516 * state, so don't destroy it earlier
518 radeon_mm_destroy(r300
);
521 /* free the option cache */
522 driDestroyOptionCache(&r300
->radeon
.optionCache
);