{ NULL, NULL }
};
-extern const struct tnl_pipeline_stage _radeon_texrect_stage;
extern const struct tnl_pipeline_stage _radeon_render_stage;
extern const struct tnl_pipeline_stage _radeon_tcl_stage;
&_tnl_texgen_stage,
&_tnl_texture_transform_stage,
- /* Scale texture rectangle to 0..1.
- */
- &_radeon_texrect_stage,
-
&_radeon_render_stage,
&_tnl_render_stage, /* FALLBACK: */
NULL,
* Deferred state management - matrices, textures, other?
*/
-static void texmat_set_texrect( radeonContextPtr rmesa,
- struct gl_texture_object *tObj, GLuint unit )
-{
- const struct gl_texture_image *baseImage = tObj->Image[0][tObj->BaseLevel];
- _math_matrix_set_identity( &rmesa->tmpmat[unit] );
- rmesa->tmpmat[unit].m[0] = 1.0 / baseImage->Width;
- rmesa->tmpmat[unit].m[5] = 1.0 / baseImage->Height;
-
-}
-
-static void texmat_fixup_texrect( radeonContextPtr rmesa,
- struct gl_texture_object *tObj, GLuint unit )
-{
- const struct gl_texture_image *baseImage = tObj->Image[0][tObj->BaseLevel];
- GLuint i;
- for (i = 0; i < 4; i++) {
- rmesa->tmpmat[unit].m[i] = rmesa->tmpmat[unit].m[i] / baseImage->Width;
- rmesa->tmpmat[unit].m[i+4] = rmesa->tmpmat[unit].m[i+4] / baseImage->Height;
- }}
-
void radeonUploadTexMatrix( radeonContextPtr rmesa,
int unit, GLboolean swapcols )
_math_matrix_copy( &rmesa->tmpmat[unit], &rmesa->TexGenMatrix[unit] );
needMatrix = GL_TRUE;
}
- if (ctx->Texture.Unit[unit]._ReallyEnabled == TEXTURE_RECT_BIT) {
- texMatEnabled |= (RADEON_TEXGEN_TEXMAT_0_ENABLE |
- RADEON_TEXMAT_0_ENABLE) << unit;
- if (needMatrix)
- texmat_fixup_texrect( rmesa, ctx->Texture.Unit[unit]._Current, unit );
- else
- texmat_set_texrect( rmesa, ctx->Texture.Unit[unit]._Current, unit );
- needMatrix = GL_TRUE;
- }
if (needMatrix) {
rmesa->NeedTexMatrix |= 1 << unit;
radeonUploadTexMatrix( rmesa, unit,
radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
TNLcontext *tnl = TNL_CONTEXT(ctx);
- GLuint se_coord_fmt;
+ GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
+
+ se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
+ RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
+ RADEON_VTX_W0_IS_NOT_1_OVER_W0);
/* We must ensure that we don't do _tnl_need_projected_coords while in a
* rasterization fallback. As this function will be called again when we
!RENDERINPUTS_TEST( tnl->render_inputs_bitset, _TNL_ATTRIB_COLOR1 ))
|| (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) {
rmesa->swtcl.needproj = GL_TRUE;
- se_coord_fmt = (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
- RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
- RADEON_TEX1_W_ROUTING_USE_Q1);
+ se_coord_fmt |= (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
+ RADEON_VTX_Z_PRE_MULT_1_OVER_W0);
}
else {
rmesa->swtcl.needproj = GL_FALSE;
- se_coord_fmt = (RADEON_VTX_W0_IS_NOT_1_OVER_W0 |
- RADEON_TEX1_W_ROUTING_USE_Q1);
+ se_coord_fmt |= (RADEON_VTX_W0_IS_NOT_1_OVER_W0);
}
_tnl_need_projected_coords( ctx, rmesa->swtcl.needproj );
-
const struct tnl_pipeline_stage _radeon_render_stage =
{
"radeon render",
};
-/**************************************************************************/
-
-/* Radeon texture rectangle expects coords in 0..1 range, not 0..dimension
- * as in the extension spec. Need to translate here.
- *
- * Note that swrast expects 0..dimension, so if a fallback is active,
- * don't do anything. (Maybe need to configure swrast to match hw)
- */
-struct texrect_stage_data {
- GLvector4f texcoord[MAX_TEXTURE_UNITS];
-};
-
-#define TEXRECT_STAGE_DATA(stage) ((struct texrect_stage_data *)stage->privatePtr)
-
-
-static GLboolean run_texrect_stage( GLcontext *ctx,
- struct tnl_pipeline_stage *stage )
-{
- struct texrect_stage_data *store = TEXRECT_STAGE_DATA(stage);
- radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
- TNLcontext *tnl = TNL_CONTEXT(ctx);
- struct vertex_buffer *VB = &tnl->vb;
- GLuint i;
-
- if (rmesa->Fallback)
- return GL_TRUE;
-
- for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_RECT_BIT) {
- struct gl_texture_object *texObj = ctx->Texture.Unit[i].CurrentRect;
- struct gl_texture_image *texImage = texObj->Image[0][texObj->BaseLevel];
- const GLfloat iw = 1.0/texImage->Width;
- const GLfloat ih = 1.0/texImage->Height;
- GLfloat *in = (GLfloat *)VB->TexCoordPtr[i]->data;
- GLint instride = VB->TexCoordPtr[i]->stride;
- GLfloat (*out)[4] = store->texcoord[i].data;
- GLint j;
-
- store->texcoord[i].size = VB->TexCoordPtr[i]->size;
- for (j = 0 ; j < VB->Count ; j++) {
- switch (VB->TexCoordPtr[i]->size) {
- case 4:
- out[j][3] = in[3];
- /* fallthrough */
- case 3:
- out[j][2] = in[2];
- /* fallthrough */
- default:
- out[j][0] = in[0] * iw;
- out[j][1] = in[1] * ih;
- }
- in = (GLfloat *)((GLubyte *)in + instride);
- }
-
- VB->AttribPtr[VERT_ATTRIB_TEX0+i] = VB->TexCoordPtr[i] = &store->texcoord[i];
- }
- }
-
- return GL_TRUE;
-}
-
-
-/* Called the first time stage->run() is invoked.
- */
-static GLboolean alloc_texrect_data( GLcontext *ctx,
- struct tnl_pipeline_stage *stage )
-{
- struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
- struct texrect_stage_data *store;
- GLuint i;
-
- stage->privatePtr = CALLOC(sizeof(*store));
- store = TEXRECT_STAGE_DATA(stage);
- if (!store)
- return GL_FALSE;
-
- for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++)
- _mesa_vector4f_alloc( &store->texcoord[i], 0, VB->Size, 32 );
-
- return GL_TRUE;
-}
-
-static void free_texrect_data( struct tnl_pipeline_stage *stage )
-{
- struct texrect_stage_data *store = TEXRECT_STAGE_DATA(stage);
- GLuint i;
-
- if (store) {
- for (i = 0 ; i < MAX_TEXTURE_UNITS ; i++)
- if (store->texcoord[i].data)
- _mesa_vector4f_free( &store->texcoord[i] );
- FREE( store );
- stage->privatePtr = NULL;
- }
-}
-
-const struct tnl_pipeline_stage _radeon_texrect_stage =
-{
- "radeon texrect stage", /* name */
- NULL,
- alloc_texrect_data,
- free_texrect_data,
- NULL,
- run_texrect_stage
-};
-
-
/**************************************************************************/
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
- GLuint se_coord_fmt = (RADEON_VTX_W0_IS_NOT_1_OVER_W0 |
- RADEON_TEX1_W_ROUTING_USE_Q1);
+ GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
+
+ se_coord_fmt &= ~(RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
+ RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
+ RADEON_VTX_W0_IS_NOT_1_OVER_W0);
+ se_coord_fmt |= RADEON_VTX_W0_IS_NOT_1_OVER_W0;
if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
RADEON_STATECHANGE( rmesa, set );
radeonTexObjPtr texobj )
{
GLuint *cmd = RADEON_DB_STATE( tex[unit] );
+ GLuint se_coord_fmt = rmesa->hw.set.cmd[SET_SE_COORDFMT];
cmd[TEX_PP_TXFILTER] &= ~TEXOBJ_TXFILTER_MASK;
cmd[TEX_PP_TXFILTER] |= texobj->pp_txfilter & TEXOBJ_TXFILTER_MASK;
cmd[TEX_PP_TXOFFSET] = texobj->pp_txoffset;
cmd[TEX_PP_BORDER_COLOR] = texobj->pp_border_color;
- if (texobj->base.tObj->Target == GL_TEXTURE_CUBE_MAP) {
- GLuint *cube_cmd = RADEON_DB_STATE( cube[unit] );
- GLuint bytesPerFace = texobj->base.totalSize / 6;
- ASSERT(texobj->base.totalSize % 6 == 0);
-
- cube_cmd[CUBE_PP_CUBIC_FACES] = texobj->pp_cubic_faces;
- /* dont know if this setup conforms to OpenGL..
- * at least it matches the behavior of mesa software renderer
- */
- cube_cmd[CUBE_PP_CUBIC_OFFSET_0] = texobj->pp_txoffset; /* right */
- cube_cmd[CUBE_PP_CUBIC_OFFSET_1] = texobj->pp_txoffset + 1 * bytesPerFace; /* left */
- cube_cmd[CUBE_PP_CUBIC_OFFSET_2] = texobj->pp_txoffset + 2 * bytesPerFace; /* top */
- cube_cmd[CUBE_PP_CUBIC_OFFSET_3] = texobj->pp_txoffset + 3 * bytesPerFace; /* bottom */
- cube_cmd[CUBE_PP_CUBIC_OFFSET_4] = texobj->pp_txoffset + 4 * bytesPerFace; /* front */
- RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.cube[unit] );
- cmd[TEX_PP_TXOFFSET] = texobj->pp_txoffset + 5 * bytesPerFace; /* back */
- }
- else if (texobj->base.tObj->Target == GL_TEXTURE_RECTANGLE_NV) {
+ if (texobj->base.tObj->Target == GL_TEXTURE_RECTANGLE_NV) {
GLuint *txr_cmd = RADEON_DB_STATE( txr[unit] );
txr_cmd[TXR_PP_TEX_SIZE] = texobj->pp_txsize; /* NPOT only! */
txr_cmd[TXR_PP_TEX_PITCH] = texobj->pp_txpitch; /* NPOT only! */
RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.txr[unit] );
+ se_coord_fmt |= RADEON_VTX_ST0_NONPARAMETRIC << unit;
+ }
+ else {
+ se_coord_fmt &= ~(RADEON_VTX_ST0_NONPARAMETRIC << unit);
+
+ if (texobj->base.tObj->Target == GL_TEXTURE_CUBE_MAP) {
+ GLuint *cube_cmd = RADEON_DB_STATE( cube[unit] );
+ GLuint bytesPerFace = texobj->base.totalSize / 6;
+ ASSERT(texobj->base.totalSize % 6 == 0);
+
+ cube_cmd[CUBE_PP_CUBIC_FACES] = texobj->pp_cubic_faces;
+ /* dont know if this setup conforms to OpenGL..
+ * at least it matches the behavior of mesa software renderer
+ */
+ cube_cmd[CUBE_PP_CUBIC_OFFSET_0] = texobj->pp_txoffset; /* right */
+ cube_cmd[CUBE_PP_CUBIC_OFFSET_1] = texobj->pp_txoffset + 1 * bytesPerFace; /* left */
+ cube_cmd[CUBE_PP_CUBIC_OFFSET_2] = texobj->pp_txoffset + 2 * bytesPerFace; /* top */
+ cube_cmd[CUBE_PP_CUBIC_OFFSET_3] = texobj->pp_txoffset + 3 * bytesPerFace; /* bottom */
+ cube_cmd[CUBE_PP_CUBIC_OFFSET_4] = texobj->pp_txoffset + 4 * bytesPerFace; /* front */
+ RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.cube[unit] );
+ cmd[TEX_PP_TXOFFSET] = texobj->pp_txoffset + 5 * bytesPerFace; /* back */
+ }
}
RADEON_DB_STATECHANGE( rmesa, &rmesa->hw.tex[unit] );
+ if (se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT]) {
+ RADEON_STATECHANGE( rmesa, set );
+ rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
+ }
texobj->dirty_state &= ~(1<<unit);
}