fix handling of textures with a base internal format that does not have all four...
authorRoland Scheidegger <rscheidegger@gmx.ch>
Sun, 15 Oct 2006 21:47:56 +0000 (21:47 +0000)
committerRoland Scheidegger <rscheidegger@gmx.ch>
Sun, 15 Oct 2006 21:47:56 +0000 (21:47 +0000)
src/mesa/drivers/dri/r200/r200_tex.c
src/mesa/drivers/dri/radeon/radeon_texstate.c

index 2cfbf3510b990a26f03d2766b6874039fda1ab7d..6c6450c681ff858931ebda1504675a7ecc3cd992 100644 (file)
@@ -405,7 +405,9 @@ r200ChooseTextureFormat( GLcontext *ctx, GLint internalFormat,
    case GL_ALPHA12:
    case GL_ALPHA16:
    case GL_COMPRESSED_ALPHA:
-      return _dri_texformat_a8;
+   /* can't use a8 format since interpreting hw I8 as a8 would result
+      in wrong rgb values (same as alpha value instead of 0). */
+      return _dri_texformat_al88;
 
    case 1:
    case GL_LUMINANCE:
index 1e3a3951e2dcf657d048e139b538c02da9511e32..cfa8d4c9fa2b29a95871245adb7e5e2cc8c5b821 100644 (file)
@@ -523,9 +523,10 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
 
    /* Set the texture environment state.  Isn't this nice and clean?
     * The chip will automagically set the texture alpha to 0xff when
-    * the texture format does not include an alpha component.  This
+    * the texture format does not include an alpha component. This
     * reduces the amount of special-casing we have to do, alpha-only
-    * textures being a notable exception.
+    * textures being a notable exception. Doesn't work for luminance
+    * textures realized with I8 and ALPHA_IN_MAP not set neither (on r100).
     */
     /* Don't cache these results.
     */
@@ -555,7 +556,10 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
         assert(op <= 3);
         switch ( srcRGBi ) {
         case GL_TEXTURE:
-           color_arg[i] = radeon_texture_color[op][unit];
+           if (texUnit->_Current->Image[0][0]->_BaseFormat == GL_ALPHA)
+              color_arg[i] = radeon_zero_color[op];
+           else
+              color_arg[i] = radeon_texture_color[op][unit];
            break;
         case GL_CONSTANT:
            color_arg[i] = radeon_tfactor_color[op];
@@ -574,12 +578,17 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
            break;
         case GL_TEXTURE0:
         case GL_TEXTURE1:
-        case GL_TEXTURE2:
+        case GL_TEXTURE2: {
+           GLuint txunit = srcRGBi - GL_TEXTURE0;
+           if (ctx->Texture.Unit[txunit]._Current->Image[0][0]->_BaseFormat == GL_ALPHA)
+              color_arg[i] = radeon_zero_color[op];
+           else
         /* implement ogl 1.4/1.5 core spec here, not specification of
          * GL_ARB_texture_env_crossbar (which would require disabling blending
          * instead of undefined results when referencing not enabled texunit) */
-          color_arg[i] = radeon_texture_color[op][srcRGBi - GL_TEXTURE0];
-          break;
+             color_arg[i] = radeon_texture_color[op][txunit];
+           }
+           break;
         default:
            return GL_FALSE;
         }
@@ -592,7 +601,10 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
         assert(op <= 1);
         switch ( srcAi ) {
         case GL_TEXTURE:
-           alpha_arg[i] = radeon_texture_alpha[op][unit];
+           if (texUnit->_Current->Image[0][0]->_BaseFormat == GL_LUMINANCE)
+              alpha_arg[i] = radeon_zero_alpha[op+1];
+           else
+              alpha_arg[i] = radeon_texture_alpha[op][unit];
            break;
         case GL_CONSTANT:
            alpha_arg[i] = radeon_tfactor_alpha[op];
@@ -611,9 +623,14 @@ static GLboolean radeonUpdateTextureEnv( GLcontext *ctx, int unit )
            break;
         case GL_TEXTURE0:
         case GL_TEXTURE1:
-        case GL_TEXTURE2:
-          alpha_arg[i] = radeon_texture_alpha[op][srcAi - GL_TEXTURE0];
-          break;
+        case GL_TEXTURE2: {    
+           GLuint txunit = srcAi - GL_TEXTURE0;
+           if (ctx->Texture.Unit[txunit]._Current->Image[0][0]->_BaseFormat == GL_LUMINANCE)
+              alpha_arg[i] = radeon_zero_alpha[op+1];
+           else
+              alpha_arg[i] = radeon_texture_alpha[op][txunit];
+           }
+           break;
         default:
            return GL_FALSE;
         }