added a few more fallbackStrings (Andreas Stenglein)
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texstate.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_texstate.c,v 1.6 2002/12/16 16:18:59 dawes Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Kevin E. Martin <martin@valinux.com>
34 * Gareth Hughes <gareth@valinux.com>
35 */
36
37 #include "glheader.h"
38 #include "imports.h"
39 #include "colormac.h"
40 #include "context.h"
41 #include "macros.h"
42 #include "texformat.h"
43 #include "enums.h"
44
45 #include "radeon_context.h"
46 #include "radeon_state.h"
47 #include "radeon_ioctl.h"
48 #include "radeon_swtcl.h"
49 #include "radeon_tex.h"
50 #include "radeon_tcl.h"
51
52
53 #define RADEON_TXFORMAT_A8 RADEON_TXFORMAT_I8
54 #define RADEON_TXFORMAT_L8 RADEON_TXFORMAT_I8
55 #define RADEON_TXFORMAT_AL88 RADEON_TXFORMAT_AI88
56 #define RADEON_TXFORMAT_YCBCR RADEON_TXFORMAT_YVYU422
57 #define RADEON_TXFORMAT_YCBCR_REV RADEON_TXFORMAT_VYUY422
58 #define RADEON_TXFORMAT_RGB_DXT1 RADEON_TXFORMAT_DXT1
59 #define RADEON_TXFORMAT_RGBA_DXT1 RADEON_TXFORMAT_DXT1
60 #define RADEON_TXFORMAT_RGBA_DXT3 RADEON_TXFORMAT_DXT23
61 #define RADEON_TXFORMAT_RGBA_DXT5 RADEON_TXFORMAT_DXT45
62
63 #define _COLOR(f) \
64 [ MESA_FORMAT_ ## f ] = { RADEON_TXFORMAT_ ## f, 0 }
65 #define _COLOR_REV(f) \
66 [ MESA_FORMAT_ ## f ## _REV ] = { RADEON_TXFORMAT_ ## f, 0 }
67 #define _ALPHA(f) \
68 [ MESA_FORMAT_ ## f ] = { RADEON_TXFORMAT_ ## f | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 }
69 #define _ALPHA_REV(f) \
70 [ MESA_FORMAT_ ## f ## _REV ] = { RADEON_TXFORMAT_ ## f | RADEON_TXFORMAT_ALPHA_IN_MAP, 0 }
71 #define _YUV(f) \
72 [ MESA_FORMAT_ ## f ] = { RADEON_TXFORMAT_ ## f, RADEON_YUV_TO_RGB }
73 #define _INVALID(f) \
74 [ MESA_FORMAT_ ## f ] = { 0xffffffff, 0 }
75 #define VALID_FORMAT(f) ( ((f) <= MESA_FORMAT_RGBA_DXT5) \
76 && (tx_table[f].format != 0xffffffff) )
77
78 static const struct {
79 GLuint format, filter;
80 }
81 tx_table[] =
82 {
83 _ALPHA(RGBA8888),
84 _ALPHA_REV(RGBA8888),
85 _ALPHA(ARGB8888),
86 _ALPHA_REV(ARGB8888),
87 _INVALID(RGB888),
88 _COLOR(RGB565),
89 _COLOR_REV(RGB565),
90 _ALPHA(ARGB4444),
91 _ALPHA_REV(ARGB4444),
92 _ALPHA(ARGB1555),
93 _ALPHA_REV(ARGB1555),
94 _ALPHA(AL88),
95 _ALPHA_REV(AL88),
96 _ALPHA(A8),
97 _COLOR(L8),
98 _ALPHA(I8),
99 _INVALID(CI8),
100 _YUV(YCBCR),
101 _YUV(YCBCR_REV),
102 _INVALID(RGB_FXT1),
103 _INVALID(RGBA_FXT1),
104 _COLOR(RGB_DXT1),
105 _ALPHA(RGBA_DXT1),
106 _ALPHA(RGBA_DXT3),
107 _ALPHA(RGBA_DXT5),
108 };
109
110 #undef _COLOR
111 #undef _ALPHA
112 #undef _INVALID
113
114 /**
115 * This function computes the number of bytes of storage needed for
116 * the given texture object (all mipmap levels, all cube faces).
117 * The \c image[face][level].x/y/width/height parameters for upload/blitting
118 * are computed here. \c pp_txfilter, \c pp_txformat, etc. will be set here
119 * too.
120 *
121 * \param rmesa Context pointer
122 * \param tObj GL texture object whose images are to be posted to
123 * hardware state.
124 */
125 static void radeonSetTexImages( radeonContextPtr rmesa,
126 struct gl_texture_object *tObj )
127 {
128 radeonTexObjPtr t = (radeonTexObjPtr)tObj->DriverData;
129 const struct gl_texture_image *baseImage = tObj->Image[0][tObj->BaseLevel];
130 GLint curOffset, blitWidth;
131 GLint i, texelBytes;
132 GLint numLevels;
133 GLint log2Width, log2Height, log2Depth;
134
135 /* Set the hardware texture format
136 */
137
138 t->pp_txformat &= ~(RADEON_TXFORMAT_FORMAT_MASK |
139 RADEON_TXFORMAT_ALPHA_IN_MAP);
140 t->pp_txfilter &= ~RADEON_YUV_TO_RGB;
141
142 if ( VALID_FORMAT( baseImage->TexFormat->MesaFormat ) ) {
143 t->pp_txformat |= tx_table[ baseImage->TexFormat->MesaFormat ].format;
144 t->pp_txfilter |= tx_table[ baseImage->TexFormat->MesaFormat ].filter;
145 }
146 else {
147 _mesa_problem(NULL, "unexpected texture format in %s", __FUNCTION__);
148 return;
149 }
150
151 texelBytes = baseImage->TexFormat->TexelBytes;
152
153 /* Compute which mipmap levels we really want to send to the hardware.
154 */
155
156 driCalculateTextureFirstLastLevel( (driTextureObject *) t );
157 log2Width = tObj->Image[0][t->base.firstLevel]->WidthLog2;
158 log2Height = tObj->Image[0][t->base.firstLevel]->HeightLog2;
159 log2Depth = tObj->Image[0][t->base.firstLevel]->DepthLog2;
160
161 numLevels = t->base.lastLevel - t->base.firstLevel + 1;
162
163 assert(numLevels <= RADEON_MAX_TEXTURE_LEVELS);
164
165 /* Calculate mipmap offsets and dimensions for blitting (uploading)
166 * The idea is that we lay out the mipmap levels within a block of
167 * memory organized as a rectangle of width BLIT_WIDTH_BYTES.
168 */
169 curOffset = 0;
170 blitWidth = BLIT_WIDTH_BYTES;
171 t->tile_bits = 0;
172
173 /* figure out if this texture is suitable for tiling. */
174 if (texelBytes && (tObj->Target != GL_TEXTURE_RECTANGLE_NV)) {
175 if (rmesa->texmicrotile && (baseImage->Height > 1)) {
176 /* allow 32 (bytes) x 1 mip (which will use two times the space
177 the non-tiled version would use) max if base texture is large enough */
178 if ((numLevels == 1) ||
179 (((baseImage->Width * texelBytes / baseImage->Height) <= 32) &&
180 (baseImage->Width * texelBytes > 64)) ||
181 ((baseImage->Width * texelBytes / baseImage->Height) <= 16)) {
182 /* R100 has two microtile bits (only the txoffset reg, not the blitter)
183 weird: X2 + OPT: 32bit correct, 16bit completely hosed
184 X2: 32bit correct, 16bit correct
185 OPT: 32bit large mips correct, small mips hosed, 16bit completely hosed */
186 t->tile_bits |= RADEON_TXO_MICRO_TILE_X2 /*| RADEON_TXO_MICRO_TILE_OPT*/;
187 }
188 }
189 if ((baseImage->Width * texelBytes >= 256) && (baseImage->Height >= 16)) {
190 /* R100 disables macro tiling only if mip width is smaller than 256 bytes, and not
191 in the case if height is smaller than 16 (not 100% sure), as does the r200,
192 so need to disable macro tiling in that case */
193 if ((numLevels == 1) || ((baseImage->Width * texelBytes / baseImage->Height) <= 4)) {
194 t->tile_bits |= RADEON_TXO_MACRO_TILE;
195 }
196 }
197 }
198
199 for (i = 0; i < numLevels; i++) {
200 const struct gl_texture_image *texImage;
201 GLuint size;
202
203 texImage = tObj->Image[0][i + t->base.firstLevel];
204 if ( !texImage )
205 break;
206
207 /* find image size in bytes */
208 if (texImage->IsCompressed) {
209 /* need to calculate the size AFTER padding even though the texture is
210 submitted without padding.
211 Only handle pot textures currently - don't know if npot is even possible,
212 size calculation would certainly need (trivial) adjustments.
213 Align (and later pad) to 32byte, not sure what that 64byte blit width is
214 good for? */
215 if ((t->pp_txformat & RADEON_TXFORMAT_FORMAT_MASK) == RADEON_TXFORMAT_DXT1) {
216 /* RGB_DXT1/RGBA_DXT1, 8 bytes per block */
217 if ((texImage->Width + 3) < 8) /* width one block */
218 size = texImage->CompressedSize * 4;
219 else if ((texImage->Width + 3) < 16)
220 size = texImage->CompressedSize * 2;
221 else size = texImage->CompressedSize;
222 }
223 else /* DXT3/5, 16 bytes per block */
224 if ((texImage->Width + 3) < 8)
225 size = texImage->CompressedSize * 2;
226 else size = texImage->CompressedSize;
227 }
228 else if (tObj->Target == GL_TEXTURE_RECTANGLE_NV) {
229 size = ((texImage->Width * texelBytes + 63) & ~63) * texImage->Height;
230 }
231 else if (t->tile_bits & RADEON_TXO_MICRO_TILE_X2) {
232 /* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned,
233 though the actual offset may be different (if texture is less than
234 32 bytes width) to the untiled case */
235 int w = (texImage->Width * texelBytes * 2 + 31) & ~31;
236 size = (w * ((texImage->Height + 1) / 2)) * texImage->Depth;
237 blitWidth = MAX2(texImage->Width, 64 / texelBytes);
238 }
239 else {
240 int w = (texImage->Width * texelBytes + 31) & ~31;
241 size = w * texImage->Height * texImage->Depth;
242 blitWidth = MAX2(texImage->Width, 64 / texelBytes);
243 }
244 assert(size > 0);
245
246 /* Align to 32-byte offset. It is faster to do this unconditionally
247 * (no branch penalty).
248 */
249
250 curOffset = (curOffset + 0x1f) & ~0x1f;
251
252 if (texelBytes) {
253 t->image[0][i].x = curOffset; /* fix x and y coords up later together with offset */
254 t->image[0][i].y = 0;
255 t->image[0][i].width = MIN2(size / texelBytes, blitWidth);
256 t->image[0][i].height = (size / texelBytes) / t->image[0][i].width;
257 }
258 else {
259 t->image[0][i].x = curOffset % BLIT_WIDTH_BYTES;
260 t->image[0][i].y = curOffset / BLIT_WIDTH_BYTES;
261 t->image[0][i].width = MIN2(size, BLIT_WIDTH_BYTES);
262 t->image[0][i].height = size / t->image[0][i].width;
263 }
264
265 #if 0
266 /* for debugging only and only applicable to non-rectangle targets */
267 assert(size % t->image[0][i].width == 0);
268 assert(t->image[0][i].x == 0
269 || (size < BLIT_WIDTH_BYTES && t->image[0][i].height == 1));
270 #endif
271
272 if (0)
273 fprintf(stderr,
274 "level %d: %dx%d x=%d y=%d w=%d h=%d size=%d at %d\n",
275 i, texImage->Width, texImage->Height,
276 t->image[0][i].x, t->image[0][i].y,
277 t->image[0][i].width, t->image[0][i].height, size, curOffset);
278
279 curOffset += size;
280
281 }
282
283 /* Align the total size of texture memory block.
284 */
285 t->base.totalSize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
286
287 /* Hardware state:
288 */
289 t->pp_txfilter &= ~RADEON_MAX_MIP_LEVEL_MASK;
290 t->pp_txfilter |= (numLevels - 1) << RADEON_MAX_MIP_LEVEL_SHIFT;
291
292 t->pp_txformat &= ~(RADEON_TXFORMAT_WIDTH_MASK |
293 RADEON_TXFORMAT_HEIGHT_MASK |
294 RADEON_TXFORMAT_CUBIC_MAP_ENABLE);
295 t->pp_txformat |= ((log2Width << RADEON_TXFORMAT_WIDTH_SHIFT) |
296 (log2Height << RADEON_TXFORMAT_HEIGHT_SHIFT));
297
298 t->pp_txsize = (((tObj->Image[0][t->base.firstLevel]->Width - 1) << 0) |
299 ((tObj->Image[0][t->base.firstLevel]->Height - 1) << 16));
300
301 /* Only need to round to nearest 32 for textures, but the blitter
302 * requires 64-byte aligned pitches, and we may/may not need the
303 * blitter. NPOT only!
304 */
305 if (baseImage->IsCompressed)
306 t->pp_txpitch = (tObj->Image[0][t->base.firstLevel]->Width + 63) & ~(63);
307 else
308 t->pp_txpitch = ((tObj->Image[0][t->base.firstLevel]->Width * texelBytes) + 63) & ~(63);
309 t->pp_txpitch -= 32;
310
311 t->dirty_state = TEX_ALL;
312
313 /* FYI: radeonUploadTexImages( rmesa, t ); used to be called here */
314 }
315
316
317
318 /* ================================================================
319 * Texture combine functions
320 */
321
322 /* GL_ARB_texture_env_combine support
323 */
324
325 /* The color tables have combine functions for GL_SRC_COLOR,
326 * GL_ONE_MINUS_SRC_COLOR, GL_SRC_ALPHA and GL_ONE_MINUS_SRC_ALPHA.
327 */
328 static GLuint radeon_texture_color[][RADEON_MAX_TEXTURE_UNITS] =
329 {
330 {
331 RADEON_COLOR_ARG_A_T0_COLOR,
332 RADEON_COLOR_ARG_A_T1_COLOR,
333 RADEON_COLOR_ARG_A_T2_COLOR
334 },
335 {
336 RADEON_COLOR_ARG_A_T0_COLOR | RADEON_COMP_ARG_A,
337 RADEON_COLOR_ARG_A_T1_COLOR | RADEON_COMP_ARG_A,
338 RADEON_COLOR_ARG_A_T2_COLOR | RADEON_COMP_ARG_A
339 },
340 {
341 RADEON_COLOR_ARG_A_T0_ALPHA,
342 RADEON_COLOR_ARG_A_T1_ALPHA,
343 RADEON_COLOR_ARG_A_T2_ALPHA
344 },
345 {
346 RADEON_COLOR_ARG_A_T0_ALPHA | RADEON_COMP_ARG_A,
347 RADEON_COLOR_ARG_A_T1_ALPHA | RADEON_COMP_ARG_A,
348 RADEON_COLOR_ARG_A_T2_ALPHA | RADEON_COMP_ARG_A
349 },
350 };
351
352 static GLuint radeon_tfactor_color[] =
353 {
354 RADEON_COLOR_ARG_A_TFACTOR_COLOR,
355 RADEON_COLOR_ARG_A_TFACTOR_COLOR | RADEON_COMP_ARG_A,
356 RADEON_COLOR_ARG_A_TFACTOR_ALPHA,
357 RADEON_COLOR_ARG_A_TFACTOR_ALPHA | RADEON_COMP_ARG_A
358 };
359
360 static GLuint radeon_primary_color[] =
361 {
362 RADEON_COLOR_ARG_A_DIFFUSE_COLOR,
363 RADEON_COLOR_ARG_A_DIFFUSE_COLOR | RADEON_COMP_ARG_A,
364 RADEON_COLOR_ARG_A_DIFFUSE_ALPHA,
365 RADEON_COLOR_ARG_A_DIFFUSE_ALPHA | RADEON_COMP_ARG_A
366 };
367
368 static GLuint radeon_previous_color[] =
369 {
370 RADEON_COLOR_ARG_A_CURRENT_COLOR,
371 RADEON_COLOR_ARG_A_CURRENT_COLOR | RADEON_COMP_ARG_A,
372 RADEON_COLOR_ARG_A_CURRENT_ALPHA,
373 RADEON_COLOR_ARG_A_CURRENT_ALPHA | RADEON_COMP_ARG_A
374 };
375
376 /* GL_ZERO table - indices 0-3
377 * GL_ONE table - indices 1-4
378 */
379 static GLuint radeon_zero_color[] =
380 {
381 RADEON_COLOR_ARG_A_ZERO,
382 RADEON_COLOR_ARG_A_ZERO | RADEON_COMP_ARG_A,
383 RADEON_COLOR_ARG_A_ZERO,
384 RADEON_COLOR_ARG_A_ZERO | RADEON_COMP_ARG_A,
385 RADEON_COLOR_ARG_A_ZERO
386 };
387
388
389 /* The alpha tables only have GL_SRC_ALPHA and GL_ONE_MINUS_SRC_ALPHA.
390 */
391 static GLuint radeon_texture_alpha[][RADEON_MAX_TEXTURE_UNITS] =
392 {
393 {
394 RADEON_ALPHA_ARG_A_T0_ALPHA,
395 RADEON_ALPHA_ARG_A_T1_ALPHA,
396 RADEON_ALPHA_ARG_A_T2_ALPHA
397 },
398 {
399 RADEON_ALPHA_ARG_A_T0_ALPHA | RADEON_COMP_ARG_A,
400 RADEON_ALPHA_ARG_A_T1_ALPHA | RADEON_COMP_ARG_A,
401 RADEON_ALPHA_ARG_A_T2_ALPHA | RADEON_COMP_ARG_A
402 },
403 };
404
405 static GLuint radeon_tfactor_alpha[] =
406 {
407 RADEON_ALPHA_ARG_A_TFACTOR_ALPHA,
408 RADEON_ALPHA_ARG_A_TFACTOR_ALPHA | RADEON_COMP_ARG_A
409 };
410
411 static GLuint radeon_primary_alpha[] =
412 {
413 RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA,
414 RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA | RADEON_COMP_ARG_A
415 };
416
417 static GLuint radeon_previous_alpha[] =
418 {
419 RADEON_ALPHA_ARG_A_CURRENT_ALPHA,
420 RADEON_ALPHA_ARG_A_CURRENT_ALPHA | RADEON_COMP_ARG_A
421 };
422
423 /* GL_ZERO table - indices 0-1
424 * GL_ONE table - indices 1-2
425 */
426 static GLuint radeon_zero_alpha[] =
427 {
428 RADEON_ALPHA_ARG_A_ZERO,
429 RADEON_ALPHA_ARG_A_ZERO | RADEON_COMP_ARG_A,
430 RADEON_ALPHA_ARG_A_ZERO
431 };
432
433
434 /* Extract the arg from slot A, shift it into the correct argument slot
435 * and set the corresponding complement bit.
436 */
437 #define RADEON_COLOR_ARG( n, arg ) \
438 do { \
439 color_combine |= \
440 ((color_arg[n] & RADEON_COLOR_ARG_MASK) \
441 << RADEON_COLOR_ARG_##arg##_SHIFT); \
442 color_combine |= \
443 ((color_arg[n] >> RADEON_COMP_ARG_SHIFT) \
444 << RADEON_COMP_ARG_##arg##_SHIFT); \
445 } while (0)
446
447 #define RADEON_ALPHA_ARG( n, arg ) \
448 do { \
449 alpha_combine |= \
450 ((alpha_arg[n] & RADEON_ALPHA_ARG_MASK) \
451 << RADEON_ALPHA_ARG_##arg##_SHIFT); \
452 alpha_combine |= \
453 ((alpha_arg[n] >> RADEON_COMP_ARG_SHIFT) \
454 << RADEON_COMP_ARG_##arg##_SHIFT); \
455 } while (0)
456
457
458 /* ================================================================
459 * Texture unit state management
460 */
461
462 static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
463 {
464 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
465 const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
466 GLuint color_combine, alpha_combine;
467 const GLuint color_combine0 = RADEON_COLOR_ARG_A_ZERO | RADEON_COLOR_ARG_B_ZERO
468 | RADEON_COLOR_ARG_C_CURRENT_COLOR | RADEON_BLEND_CTL_ADD
469 | RADEON_SCALE_1X | RADEON_CLAMP_TX;
470 const GLuint alpha_combine0 = RADEON_ALPHA_ARG_A_ZERO | RADEON_ALPHA_ARG_B_ZERO
471 | RADEON_ALPHA_ARG_C_CURRENT_ALPHA | RADEON_BLEND_CTL_ADD
472 | RADEON_SCALE_1X | RADEON_CLAMP_TX;
473
474
475 /* texUnit->_Current can be NULL if and only if the texture unit is
476 * not actually enabled.
477 */
478 assert( (texUnit->_ReallyEnabled == 0)
479 || (texUnit->_Current != NULL) );
480
481 if ( RADEON_DEBUG & DEBUG_TEXTURE ) {
482 fprintf( stderr, "%s( %p, %d )\n", __FUNCTION__, (void *)ctx, unit );
483 }
484
485 /* Set the texture environment state. Isn't this nice and clean?
486 * The chip will automagically set the texture alpha to 0xff when
487 * the texture format does not include an alpha component. This
488 * reduces the amount of special-casing we have to do, alpha-only
489 * textures being a notable exception.
490 */
491 /* Don't cache these results.
492 */
493 rmesa->state.texture.unit[unit].format = 0;
494 rmesa->state.texture.unit[unit].envMode = 0;
495
496 if ( !texUnit->_ReallyEnabled ) {
497 color_combine = color_combine0;
498 alpha_combine = alpha_combine0;
499 }
500 else {
501 GLuint color_arg[3], alpha_arg[3];
502 GLuint i;
503 const GLuint numColorArgs = texUnit->_CurrentCombine->_NumArgsRGB;
504 const GLuint numAlphaArgs = texUnit->_CurrentCombine->_NumArgsA;
505 GLuint RGBshift = texUnit->_CurrentCombine->ScaleShiftRGB;
506 GLuint Ashift = texUnit->_CurrentCombine->ScaleShiftA;
507
508
509 /* Step 1:
510 * Extract the color and alpha combine function arguments.
511 */
512 for ( i = 0 ; i < numColorArgs ; i++ ) {
513 const GLint op = texUnit->_CurrentCombine->OperandRGB[i] - GL_SRC_COLOR;
514 const GLuint srcRGBi = texUnit->_CurrentCombine->SourceRGB[i];
515 assert(op >= 0);
516 assert(op <= 3);
517 switch ( srcRGBi ) {
518 case GL_TEXTURE:
519 color_arg[i] = radeon_texture_color[op][unit];
520 break;
521 case GL_CONSTANT:
522 color_arg[i] = radeon_tfactor_color[op];
523 break;
524 case GL_PRIMARY_COLOR:
525 color_arg[i] = radeon_primary_color[op];
526 break;
527 case GL_PREVIOUS:
528 color_arg[i] = radeon_previous_color[op];
529 break;
530 case GL_ZERO:
531 color_arg[i] = radeon_zero_color[op];
532 break;
533 case GL_ONE:
534 color_arg[i] = radeon_zero_color[op+1];
535 break;
536 case GL_TEXTURE0:
537 case GL_TEXTURE1:
538 case GL_TEXTURE2:
539 /* implement ogl 1.4/1.5 core spec here, not specification of
540 * GL_ARB_texture_env_crossbar (which would require disabling blending
541 * instead of undefined results when referencing not enabled texunit) */
542 color_arg[i] = radeon_texture_color[op][srcRGBi - GL_TEXTURE0];
543 break;
544 default:
545 return GL_FALSE;
546 }
547 }
548
549 for ( i = 0 ; i < numAlphaArgs ; i++ ) {
550 const GLint op = texUnit->_CurrentCombine->OperandA[i] - GL_SRC_ALPHA;
551 const GLuint srcAi = texUnit->_CurrentCombine->SourceA[i];
552 assert(op >= 0);
553 assert(op <= 1);
554 switch ( srcAi ) {
555 case GL_TEXTURE:
556 alpha_arg[i] = radeon_texture_alpha[op][unit];
557 break;
558 case GL_CONSTANT:
559 alpha_arg[i] = radeon_tfactor_alpha[op];
560 break;
561 case GL_PRIMARY_COLOR:
562 alpha_arg[i] = radeon_primary_alpha[op];
563 break;
564 case GL_PREVIOUS:
565 alpha_arg[i] = radeon_previous_alpha[op];
566 break;
567 case GL_ZERO:
568 alpha_arg[i] = radeon_zero_alpha[op];
569 break;
570 case GL_ONE:
571 alpha_arg[i] = radeon_zero_alpha[op+1];
572 break;
573 case GL_TEXTURE0:
574 case GL_TEXTURE1:
575 case GL_TEXTURE2:
576 alpha_arg[i] = radeon_texture_alpha[op][srcAi - GL_TEXTURE0];
577 break;
578 default:
579 return GL_FALSE;
580 }
581 }
582
583 /* Step 2:
584 * Build up the color and alpha combine functions.
585 */
586 switch ( texUnit->_CurrentCombine->ModeRGB ) {
587 case GL_REPLACE:
588 color_combine = (RADEON_COLOR_ARG_A_ZERO |
589 RADEON_COLOR_ARG_B_ZERO |
590 RADEON_BLEND_CTL_ADD |
591 RADEON_CLAMP_TX);
592 RADEON_COLOR_ARG( 0, C );
593 break;
594 case GL_MODULATE:
595 color_combine = (RADEON_COLOR_ARG_C_ZERO |
596 RADEON_BLEND_CTL_ADD |
597 RADEON_CLAMP_TX);
598 RADEON_COLOR_ARG( 0, A );
599 RADEON_COLOR_ARG( 1, B );
600 break;
601 case GL_ADD:
602 color_combine = (RADEON_COLOR_ARG_B_ZERO |
603 RADEON_COMP_ARG_B |
604 RADEON_BLEND_CTL_ADD |
605 RADEON_CLAMP_TX);
606 RADEON_COLOR_ARG( 0, A );
607 RADEON_COLOR_ARG( 1, C );
608 break;
609 case GL_ADD_SIGNED:
610 color_combine = (RADEON_COLOR_ARG_B_ZERO |
611 RADEON_COMP_ARG_B |
612 RADEON_BLEND_CTL_ADDSIGNED |
613 RADEON_CLAMP_TX);
614 RADEON_COLOR_ARG( 0, A );
615 RADEON_COLOR_ARG( 1, C );
616 break;
617 case GL_SUBTRACT:
618 color_combine = (RADEON_COLOR_ARG_B_ZERO |
619 RADEON_COMP_ARG_B |
620 RADEON_BLEND_CTL_SUBTRACT |
621 RADEON_CLAMP_TX);
622 RADEON_COLOR_ARG( 0, A );
623 RADEON_COLOR_ARG( 1, C );
624 break;
625 case GL_INTERPOLATE:
626 color_combine = (RADEON_BLEND_CTL_BLEND |
627 RADEON_CLAMP_TX);
628 RADEON_COLOR_ARG( 0, B );
629 RADEON_COLOR_ARG( 1, A );
630 RADEON_COLOR_ARG( 2, C );
631 break;
632
633 case GL_DOT3_RGB_EXT:
634 case GL_DOT3_RGBA_EXT:
635 /* The EXT version of the DOT3 extension does not support the
636 * scale factor, but the ARB version (and the version in OpenGL
637 * 1.3) does.
638 */
639 RGBshift = 0;
640 /* FALLTHROUGH */
641
642 case GL_DOT3_RGB:
643 case GL_DOT3_RGBA:
644 /* The R100 / RV200 only support a 1X multiplier in hardware
645 * w/the ARB version.
646 */
647 if ( RGBshift != (RADEON_SCALE_1X >> RADEON_SCALE_SHIFT) ) {
648 return GL_FALSE;
649 }
650
651 RGBshift += 2;
652 if ( (texUnit->_CurrentCombine->ModeRGB == GL_DOT3_RGBA_EXT)
653 || (texUnit->_CurrentCombine->ModeRGB == GL_DOT3_RGBA) ) {
654 /* is it necessary to set this or will it be ignored anyway? */
655 Ashift = RGBshift;
656 }
657
658 color_combine = (RADEON_COLOR_ARG_C_ZERO |
659 RADEON_BLEND_CTL_DOT3 |
660 RADEON_CLAMP_TX);
661 RADEON_COLOR_ARG( 0, A );
662 RADEON_COLOR_ARG( 1, B );
663 break;
664
665 case GL_MODULATE_ADD_ATI:
666 color_combine = (RADEON_BLEND_CTL_ADD |
667 RADEON_CLAMP_TX);
668 RADEON_COLOR_ARG( 0, A );
669 RADEON_COLOR_ARG( 1, C );
670 RADEON_COLOR_ARG( 2, B );
671 break;
672 case GL_MODULATE_SIGNED_ADD_ATI:
673 color_combine = (RADEON_BLEND_CTL_ADDSIGNED |
674 RADEON_CLAMP_TX);
675 RADEON_COLOR_ARG( 0, A );
676 RADEON_COLOR_ARG( 1, C );
677 RADEON_COLOR_ARG( 2, B );
678 break;
679 case GL_MODULATE_SUBTRACT_ATI:
680 color_combine = (RADEON_BLEND_CTL_SUBTRACT |
681 RADEON_CLAMP_TX);
682 RADEON_COLOR_ARG( 0, A );
683 RADEON_COLOR_ARG( 1, C );
684 RADEON_COLOR_ARG( 2, B );
685 break;
686 default:
687 return GL_FALSE;
688 }
689
690 switch ( texUnit->_CurrentCombine->ModeA ) {
691 case GL_REPLACE:
692 alpha_combine = (RADEON_ALPHA_ARG_A_ZERO |
693 RADEON_ALPHA_ARG_B_ZERO |
694 RADEON_BLEND_CTL_ADD |
695 RADEON_CLAMP_TX);
696 RADEON_ALPHA_ARG( 0, C );
697 break;
698 case GL_MODULATE:
699 alpha_combine = (RADEON_ALPHA_ARG_C_ZERO |
700 RADEON_BLEND_CTL_ADD |
701 RADEON_CLAMP_TX);
702 RADEON_ALPHA_ARG( 0, A );
703 RADEON_ALPHA_ARG( 1, B );
704 break;
705 case GL_ADD:
706 alpha_combine = (RADEON_ALPHA_ARG_B_ZERO |
707 RADEON_COMP_ARG_B |
708 RADEON_BLEND_CTL_ADD |
709 RADEON_CLAMP_TX);
710 RADEON_ALPHA_ARG( 0, A );
711 RADEON_ALPHA_ARG( 1, C );
712 break;
713 case GL_ADD_SIGNED:
714 alpha_combine = (RADEON_ALPHA_ARG_B_ZERO |
715 RADEON_COMP_ARG_B |
716 RADEON_BLEND_CTL_ADDSIGNED |
717 RADEON_CLAMP_TX);
718 RADEON_ALPHA_ARG( 0, A );
719 RADEON_ALPHA_ARG( 1, C );
720 break;
721 case GL_SUBTRACT:
722 alpha_combine = (RADEON_COLOR_ARG_B_ZERO |
723 RADEON_COMP_ARG_B |
724 RADEON_BLEND_CTL_SUBTRACT |
725 RADEON_CLAMP_TX);
726 RADEON_ALPHA_ARG( 0, A );
727 RADEON_ALPHA_ARG( 1, C );
728 break;
729 case GL_INTERPOLATE:
730 alpha_combine = (RADEON_BLEND_CTL_BLEND |
731 RADEON_CLAMP_TX);
732 RADEON_ALPHA_ARG( 0, B );
733 RADEON_ALPHA_ARG( 1, A );
734 RADEON_ALPHA_ARG( 2, C );
735 break;
736
737 case GL_MODULATE_ADD_ATI:
738 alpha_combine = (RADEON_BLEND_CTL_ADD |
739 RADEON_CLAMP_TX);
740 RADEON_ALPHA_ARG( 0, A );
741 RADEON_ALPHA_ARG( 1, C );
742 RADEON_ALPHA_ARG( 2, B );
743 break;
744 case GL_MODULATE_SIGNED_ADD_ATI:
745 alpha_combine = (RADEON_BLEND_CTL_ADDSIGNED |
746 RADEON_CLAMP_TX);
747 RADEON_ALPHA_ARG( 0, A );
748 RADEON_ALPHA_ARG( 1, C );
749 RADEON_ALPHA_ARG( 2, B );
750 break;
751 case GL_MODULATE_SUBTRACT_ATI:
752 alpha_combine = (RADEON_BLEND_CTL_SUBTRACT |
753 RADEON_CLAMP_TX);
754 RADEON_ALPHA_ARG( 0, A );
755 RADEON_ALPHA_ARG( 1, C );
756 RADEON_ALPHA_ARG( 2, B );
757 break;
758 default:
759 return GL_FALSE;
760 }
761
762 if ( (texUnit->_CurrentCombine->ModeRGB == GL_DOT3_RGB_EXT)
763 || (texUnit->_CurrentCombine->ModeRGB == GL_DOT3_RGB) ) {
764 alpha_combine |= RADEON_DOT_ALPHA_DONT_REPLICATE;
765 }
766
767 /* Step 3:
768 * Apply the scale factor.
769 */
770 color_combine |= (RGBshift << RADEON_SCALE_SHIFT);
771 alpha_combine |= (Ashift << RADEON_SCALE_SHIFT);
772
773 /* All done!
774 */
775 }
776
777 if ( rmesa->hw.tex[unit].cmd[TEX_PP_TXCBLEND] != color_combine ||
778 rmesa->hw.tex[unit].cmd[TEX_PP_TXABLEND] != alpha_combine ) {
779 RADEON_STATECHANGE( rmesa, tex[unit] );
780 rmesa->hw.tex[unit].cmd[TEX_PP_TXCBLEND] = color_combine;
781 rmesa->hw.tex[unit].cmd[TEX_PP_TXABLEND] = alpha_combine;
782 }
783
784 return GL_TRUE;
785 }
786
787 #define TEXOBJ_TXFILTER_MASK (RADEON_MAX_MIP_LEVEL_MASK | \
788 RADEON_MIN_FILTER_MASK | \
789 RADEON_MAG_FILTER_MASK | \
790 RADEON_MAX_ANISO_MASK | \
791 RADEON_YUV_TO_RGB | \
792 RADEON_YUV_TEMPERATURE_MASK | \
793 RADEON_CLAMP_S_MASK | \
794 RADEON_CLAMP_T_MASK | \
795 RADEON_BORDER_MODE_D3D )
796
797 #define TEXOBJ_TXFORMAT_MASK (RADEON_TXFORMAT_WIDTH_MASK | \
798 RADEON_TXFORMAT_HEIGHT_MASK | \
799 RADEON_TXFORMAT_FORMAT_MASK | \
800 RADEON_TXFORMAT_F5_WIDTH_MASK | \
801 RADEON_TXFORMAT_F5_HEIGHT_MASK | \
802 RADEON_TXFORMAT_ALPHA_IN_MAP | \
803 RADEON_TXFORMAT_CUBIC_MAP_ENABLE | \
804 RADEON_TXFORMAT_NON_POWER2)
805
806
807 static void import_tex_obj_state( radeonContextPtr rmesa,
808 int unit,
809 radeonTexObjPtr texobj )
810 {
811 GLuint *cmd = RADEON_DB_STATE( tex[unit] );
812
813 cmd[TEX_PP_TXFILTER] &= ~TEXOBJ_TXFILTER_MASK;
814 cmd[TEX_PP_TXFILTER] |= texobj->pp_txfilter & TEXOBJ_TXFILTER_MASK;
815 cmd[TEX_PP_TXFORMAT] &= ~TEXOBJ_TXFORMAT_MASK;
816 cmd[TEX_PP_TXFORMAT] |= texobj->pp_txformat & TEXOBJ_TXFORMAT_MASK;
817 cmd[TEX_PP_TXOFFSET] = texobj->pp_txoffset;
818 cmd[TEX_PP_BORDER_COLOR] = texobj->pp_border_color;
819 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.tex[unit] );
820
821 if (texobj->base.tObj->Target == GL_TEXTURE_RECTANGLE_NV) {
822 GLuint *txr_cmd = RADEON_DB_STATE( txr[unit] );
823 txr_cmd[TXR_PP_TEX_SIZE] = texobj->pp_txsize; /* NPOT only! */
824 txr_cmd[TXR_PP_TEX_PITCH] = texobj->pp_txpitch; /* NPOT only! */
825 RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.txr[unit] );
826 }
827
828 texobj->dirty_state &= ~(1<<unit);
829 }
830
831
832
833
834 static void set_texgen_matrix( radeonContextPtr rmesa,
835 GLuint unit,
836 const GLfloat *s_plane,
837 const GLfloat *t_plane )
838 {
839 static const GLfloat scale_identity[4] = { 1,1,1,1 };
840
841 if (!TEST_EQ_4V( s_plane, scale_identity) ||
842 !TEST_EQ_4V( t_plane, scale_identity)) {
843 rmesa->TexGenEnabled |= RADEON_TEXMAT_0_ENABLE<<unit;
844 rmesa->TexGenMatrix[unit].m[0] = s_plane[0];
845 rmesa->TexGenMatrix[unit].m[4] = s_plane[1];
846 rmesa->TexGenMatrix[unit].m[8] = s_plane[2];
847 rmesa->TexGenMatrix[unit].m[12] = s_plane[3];
848
849 rmesa->TexGenMatrix[unit].m[1] = t_plane[0];
850 rmesa->TexGenMatrix[unit].m[5] = t_plane[1];
851 rmesa->TexGenMatrix[unit].m[9] = t_plane[2];
852 rmesa->TexGenMatrix[unit].m[13] = t_plane[3];
853 rmesa->NewGLState |= _NEW_TEXTURE_MATRIX;
854 }
855 }
856
857 /* Ignoring the Q texcoord for now.
858 *
859 * Returns GL_FALSE if fallback required.
860 */
861 static GLboolean radeon_validate_texgen( GLcontext *ctx, GLuint unit )
862 {
863 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
864 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
865 GLuint inputshift = RADEON_TEXGEN_0_INPUT_SHIFT + unit*4;
866 GLuint tmp = rmesa->TexGenEnabled;
867
868 rmesa->TexGenEnabled &= ~(RADEON_TEXGEN_TEXMAT_0_ENABLE<<unit);
869 rmesa->TexGenEnabled &= ~(RADEON_TEXMAT_0_ENABLE<<unit);
870 rmesa->TexGenEnabled &= ~(RADEON_TEXGEN_INPUT_MASK<<inputshift);
871 rmesa->TexGenNeedNormals[unit] = 0;
872
873 if ((texUnit->TexGenEnabled & (S_BIT|T_BIT)) == 0) {
874 /* Disabled, no fallback:
875 */
876 rmesa->TexGenEnabled |=
877 (RADEON_TEXGEN_INPUT_TEXCOORD_0+unit) << inputshift;
878 return GL_TRUE;
879 }
880 else if (texUnit->TexGenEnabled & Q_BIT) {
881 /* Very easy to do this, in fact would remove a fallback case
882 * elsewhere, but I haven't done it yet... Fallback:
883 */
884 if (RADEON_DEBUG & DEBUG_FALLBACKS)
885 fprintf(stderr, "fallback Q_BIT\n");
886 return GL_FALSE;
887 }
888 else if ((texUnit->TexGenEnabled & (S_BIT|T_BIT)) != (S_BIT|T_BIT) ||
889 texUnit->GenModeS != texUnit->GenModeT) {
890 /* Mixed modes, fallback:
891 */
892 if (RADEON_DEBUG & DEBUG_FALLBACKS)
893 fprintf(stderr, "fallback mixed texgen\n");
894 return GL_FALSE;
895 }
896 else
897 rmesa->TexGenEnabled |= RADEON_TEXGEN_TEXMAT_0_ENABLE << unit;
898
899 switch (texUnit->GenModeS) {
900 case GL_OBJECT_LINEAR:
901 rmesa->TexGenEnabled |= RADEON_TEXGEN_INPUT_OBJ << inputshift;
902 set_texgen_matrix( rmesa, unit,
903 texUnit->ObjectPlaneS,
904 texUnit->ObjectPlaneT);
905 break;
906
907 case GL_EYE_LINEAR:
908 rmesa->TexGenEnabled |= RADEON_TEXGEN_INPUT_EYE << inputshift;
909 set_texgen_matrix( rmesa, unit,
910 texUnit->EyePlaneS,
911 texUnit->EyePlaneT);
912 break;
913
914 case GL_REFLECTION_MAP_NV:
915 rmesa->TexGenNeedNormals[unit] = GL_TRUE;
916 rmesa->TexGenEnabled |= RADEON_TEXGEN_INPUT_EYE_REFLECT<<inputshift;
917 break;
918
919 case GL_NORMAL_MAP_NV:
920 rmesa->TexGenNeedNormals[unit] = GL_TRUE;
921 rmesa->TexGenEnabled |= RADEON_TEXGEN_INPUT_EYE_NORMAL<<inputshift;
922 break;
923
924 case GL_SPHERE_MAP:
925 default:
926 /* Unsupported mode, fallback:
927 */
928 if (RADEON_DEBUG & DEBUG_FALLBACKS)
929 fprintf(stderr, "fallback GL_SPHERE_MAP\n");
930 return GL_FALSE;
931 }
932
933 if (tmp != rmesa->TexGenEnabled) {
934 rmesa->NewGLState |= _NEW_TEXTURE_MATRIX;
935 }
936
937 return GL_TRUE;
938 }
939
940
941 static void disable_tex( GLcontext *ctx, int unit )
942 {
943 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
944
945 if (rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (RADEON_TEX_0_ENABLE<<unit)) {
946 /* Texture unit disabled */
947 if ( rmesa->state.texture.unit[unit].texobj != NULL ) {
948 /* The old texture is no longer bound to this texture unit.
949 * Mark it as such.
950 */
951
952 rmesa->state.texture.unit[unit].texobj->base.bound &= ~(1UL << unit);
953 rmesa->state.texture.unit[unit].texobj = NULL;
954 }
955
956 RADEON_STATECHANGE( rmesa, ctx );
957 rmesa->hw.ctx.cmd[CTX_PP_CNTL] &=
958 ~((RADEON_TEX_0_ENABLE | RADEON_TEX_BLEND_0_ENABLE) << unit);
959
960 RADEON_STATECHANGE( rmesa, tcl );
961 switch (unit) {
962 case 0:
963 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &= ~(RADEON_TCL_VTX_ST0 |
964 RADEON_TCL_VTX_Q0);
965 break;
966 case 1:
967 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] &= ~(RADEON_TCL_VTX_ST1 |
968 RADEON_TCL_VTX_Q1);
969 break;
970 default:
971 break;
972 }
973
974
975 if (rmesa->TclFallback & (RADEON_TCL_FALLBACK_TEXGEN_0<<unit)) {
976 TCL_FALLBACK( ctx, (RADEON_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
977 rmesa->recheck_texgen[unit] = GL_TRUE;
978 }
979
980
981
982 {
983 GLuint inputshift = RADEON_TEXGEN_0_INPUT_SHIFT + unit*4;
984 GLuint tmp = rmesa->TexGenEnabled;
985
986 rmesa->TexGenEnabled &= ~(RADEON_TEXGEN_TEXMAT_0_ENABLE<<unit);
987 rmesa->TexGenEnabled &= ~(RADEON_TEXMAT_0_ENABLE<<unit);
988 rmesa->TexGenEnabled &= ~(RADEON_TEXGEN_INPUT_MASK<<inputshift);
989 rmesa->TexGenNeedNormals[unit] = 0;
990 rmesa->TexGenEnabled |=
991 (RADEON_TEXGEN_INPUT_TEXCOORD_0+unit) << inputshift;
992
993 if (tmp != rmesa->TexGenEnabled) {
994 rmesa->recheck_texgen[unit] = GL_TRUE;
995 rmesa->NewGLState |= _NEW_TEXTURE_MATRIX;
996 }
997 }
998 }
999 }
1000
1001 static GLboolean enable_tex_2d( GLcontext *ctx, int unit )
1002 {
1003 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1004 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
1005 struct gl_texture_object *tObj = texUnit->_Current;
1006 radeonTexObjPtr t = (radeonTexObjPtr) tObj->DriverData;
1007
1008 /* Need to load the 2d images associated with this unit.
1009 */
1010 if (t->pp_txformat & RADEON_TXFORMAT_NON_POWER2) {
1011 t->pp_txformat &= ~RADEON_TXFORMAT_NON_POWER2;
1012 t->base.dirty_images[0] = ~0;
1013 }
1014
1015 ASSERT(tObj->Target == GL_TEXTURE_2D || tObj->Target == GL_TEXTURE_1D);
1016
1017 if ( t->base.dirty_images[0] ) {
1018 RADEON_FIREVERTICES( rmesa );
1019 radeonSetTexImages( rmesa, tObj );
1020 radeonUploadTexImages( rmesa, (radeonTexObjPtr) tObj->DriverData, 0 );
1021 if ( !t->base.memBlock )
1022 return GL_FALSE;
1023 }
1024
1025 return GL_TRUE;
1026 }
1027
1028 static GLboolean enable_tex_rect( GLcontext *ctx, int unit )
1029 {
1030 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1031 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
1032 struct gl_texture_object *tObj = texUnit->_Current;
1033 radeonTexObjPtr t = (radeonTexObjPtr) tObj->DriverData;
1034
1035 if (!(t->pp_txformat & RADEON_TXFORMAT_NON_POWER2)) {
1036 t->pp_txformat |= RADEON_TXFORMAT_NON_POWER2;
1037 t->base.dirty_images[0] = ~0;
1038 }
1039
1040 ASSERT(tObj->Target == GL_TEXTURE_RECTANGLE_NV);
1041
1042 if ( t->base.dirty_images[0] ) {
1043 RADEON_FIREVERTICES( rmesa );
1044 radeonSetTexImages( rmesa, tObj );
1045 radeonUploadTexImages( rmesa, (radeonTexObjPtr) tObj->DriverData, 0 );
1046 if ( !t->base.memBlock /* && !rmesa->prefer_gart_client_texturing FIXME */ ) {
1047 fprintf(stderr, "%s: upload failed\n", __FUNCTION__);
1048 return GL_FALSE;
1049 }
1050 }
1051
1052 return GL_TRUE;
1053 }
1054
1055
1056 static GLboolean update_tex_common( GLcontext *ctx, int unit )
1057 {
1058 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1059 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
1060 struct gl_texture_object *tObj = texUnit->_Current;
1061 radeonTexObjPtr t = (radeonTexObjPtr) tObj->DriverData;
1062 GLenum format;
1063
1064 /* Fallback if there's a texture border */
1065 if ( tObj->Image[0][tObj->BaseLevel]->Border > 0 ) {
1066 fprintf(stderr, "%s: border\n", __FUNCTION__);
1067 return GL_FALSE;
1068 }
1069
1070 /* Update state if this is a different texture object to last
1071 * time.
1072 */
1073 if ( rmesa->state.texture.unit[unit].texobj != t ) {
1074 if ( rmesa->state.texture.unit[unit].texobj != NULL ) {
1075 /* The old texture is no longer bound to this texture unit.
1076 * Mark it as such.
1077 */
1078
1079 rmesa->state.texture.unit[unit].texobj->base.bound &=
1080 ~(1UL << unit);
1081 }
1082
1083 rmesa->state.texture.unit[unit].texobj = t;
1084 t->base.bound |= (1UL << unit);
1085 t->dirty_state |= 1<<unit;
1086 driUpdateTextureLRU( (driTextureObject *) t ); /* XXX: should be locked! */
1087 }
1088
1089
1090 /* Newly enabled?
1091 */
1092 if ( !(rmesa->hw.ctx.cmd[CTX_PP_CNTL] & (RADEON_TEX_0_ENABLE<<unit))) {
1093 RADEON_STATECHANGE( rmesa, ctx );
1094 rmesa->hw.ctx.cmd[CTX_PP_CNTL] |=
1095 (RADEON_TEX_0_ENABLE | RADEON_TEX_BLEND_0_ENABLE) << unit;
1096
1097 RADEON_STATECHANGE( rmesa, tcl );
1098
1099 if (unit == 0)
1100 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_ST0;
1101 else
1102 rmesa->hw.tcl.cmd[TCL_OUTPUT_VTXFMT] |= RADEON_TCL_VTX_ST1;
1103
1104 rmesa->recheck_texgen[unit] = GL_TRUE;
1105 }
1106
1107 if (t->dirty_state & (1<<unit)) {
1108 import_tex_obj_state( rmesa, unit, t );
1109 }
1110
1111 if (rmesa->recheck_texgen[unit]) {
1112 GLboolean fallback = !radeon_validate_texgen( ctx, unit );
1113 TCL_FALLBACK( ctx, (RADEON_TCL_FALLBACK_TEXGEN_0<<unit), fallback);
1114 rmesa->recheck_texgen[unit] = 0;
1115 rmesa->NewGLState |= _NEW_TEXTURE_MATRIX;
1116 }
1117
1118 format = tObj->Image[0][tObj->BaseLevel]->Format;
1119 if ( rmesa->state.texture.unit[unit].format != format ||
1120 rmesa->state.texture.unit[unit].envMode != texUnit->EnvMode ) {
1121 rmesa->state.texture.unit[unit].format = format;
1122 rmesa->state.texture.unit[unit].envMode = texUnit->EnvMode;
1123 if ( ! radeonUpdateTextureEnv( ctx, unit ) ) {
1124 return GL_FALSE;
1125 }
1126 }
1127
1128 FALLBACK( rmesa, RADEON_FALLBACK_BORDER_MODE, t->border_fallback );
1129 return !t->border_fallback;
1130 }
1131
1132
1133
1134 static GLboolean radeonUpdateTextureUnit( GLcontext *ctx, int unit )
1135 {
1136 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
1137
1138 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_TEXRECT_0 << unit, 0 );
1139
1140 if ( texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT) ) {
1141 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_TEXRECT_0 << unit, 1 );
1142
1143 return (enable_tex_rect( ctx, unit ) &&
1144 update_tex_common( ctx, unit ));
1145 }
1146 else if ( texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT) ) {
1147 return (enable_tex_2d( ctx, unit ) &&
1148 update_tex_common( ctx, unit ));
1149 }
1150 else if ( texUnit->_ReallyEnabled ) {
1151 return GL_FALSE;
1152 }
1153 else {
1154 disable_tex( ctx, unit );
1155 return GL_TRUE;
1156 }
1157 }
1158
1159 void radeonUpdateTextureState( GLcontext *ctx )
1160 {
1161 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1162 GLboolean ok;
1163
1164 ok = (radeonUpdateTextureUnit( ctx, 0 ) &&
1165 radeonUpdateTextureUnit( ctx, 1 ));
1166
1167 FALLBACK( rmesa, RADEON_FALLBACK_TEXTURE, !ok );
1168
1169 if (rmesa->TclFallback)
1170 radeonChooseVertexState( ctx );
1171 }