Properly compute render_inputs_bitset when using a vertex program/shader.
authorBrian <brian@yutani.localnet.net>
Mon, 19 Mar 2007 20:44:15 +0000 (14:44 -0600)
committerBrian <brian@yutani.localnet.net>
Mon, 19 Mar 2007 20:44:15 +0000 (14:44 -0600)
This fixes a performance regression introduced early in glsl-compiler-1 work.

src/mesa/tnl/t_context.c

index 3b2f91acbaf8a86d5e817869b2bc80159ec78ac5..f665485f422a77bf2ae5274f344eef41944d0ec4 100644 (file)
@@ -150,13 +150,19 @@ _tnl_InvalidateState( GLcontext *ctx, GLuint new_state )
        (ctx->VertexProgram._Enabled && ctx->VertexProgram.PointSizeEnabled))
       RENDERINPUTS_SET( tnl->render_inputs_bitset, _TNL_ATTRIB_POINTSIZE );
 
-#if 1 /* XXX NEW_SLANG */
-   RENDERINPUTS_SET_RANGE( tnl->render_inputs_bitset,
-                           _TNL_FIRST_GENERIC, _TNL_LAST_GENERIC );
-#else
-   if (ctx->ShaderObjects._VertexShaderPresent || ctx->ShaderObjects._FragmentShaderPresent)
-      RENDERINPUTS_SET_RANGE( tnl->render_inputs_bitset, _TNL_FIRST_GENERIC, _TNL_LAST_GENERIC );
-#endif
+   /* check for varying vars which are written by the vertex program */
+   {
+      struct gl_vertex_program *vp = ctx->VertexProgram._Current;
+      if (vp) {
+         GLuint i;
+         for (i = 0; i < MAX_VARYING; i++) {
+            if (vp->Base.OutputsWritten & (1 << (VERT_RESULT_VAR0 + i))) {
+               RENDERINPUTS_SET(tnl->render_inputs_bitset,
+                                _TNL_ATTRIB_GENERIC(i));
+            }
+         }
+      }
+   }
 }