}
}
-static GLuint r300CalculateTexLodBias(GLfloat bias)
+static GLuint translate_lod_bias(GLfloat bias)
{
- GLuint b;
- b = (unsigned int)fabsf(ceilf(bias*31));
- if (signbit(bias)) {
- b ^= 0x3ff; /* 10 bits */
- }
- b <<= 3;
- b &= R300_LOD_BIAS_MASK;
- return b;
+ GLint b = (int)(bias*32);
+ if (b >= (1 << 9))
+ b = (1 << 9)-1;
+ else if (b < -(1 << 9))
+ b = -(1 << 9);
+ return ((GLuint)b) << R300_LOD_BIAS_SHIFT;
}
static void r300SetupTextures(GLcontext * ctx)
r300->hw.tex.filter.cmd[R300_TEX_VALUE_0 +
hw_tmu] =
gen_fixed_filter(t->filter) | (hw_tmu << 28);
- /* Make LOD bias a bit more per-tex and less per-everything. */
- t->filter_1 &= ~R300_LOD_BIAS_MASK;
- t->filter_1 |= r300CalculateTexLodBias(ctx->Texture.Unit[i].LodBias);
- r300->hw.tex.filter_1.cmd[R300_TEX_VALUE_0 + hw_tmu] = t->filter_1;
+ /* Note: There is a LOD bias per texture unit and a LOD bias
+ * per texture object. We add them here to get the correct behaviour.
+ * (The per-texture object LOD bias was introduced in OpenGL 1.4
+ * and is not present in the EXT_texture_object extension).
+ */
+ r300->hw.tex.filter_1.cmd[R300_TEX_VALUE_0 + hw_tmu] =
+ t->filter_1 |
+ translate_lod_bias(ctx->Texture.Unit[i].LodBias + t->base.tObj->LodBias);
r300->hw.tex.size.cmd[R300_TEX_VALUE_0 + hw_tmu] =
t->size;
r300->hw.tex.format.cmd[R300_TEX_VALUE_0 +
t->pp_border_color = PACK_COLOR_8888(c[3], c[0], c[1], c[2]);
}
-static void r300SetTexLodBias(r300TexObjPtr t, GLfloat bias)
-{
- GLuint b;
- b = (unsigned int)fabsf(ceilf(bias*31));
- if (signbit(bias)) {
- b ^= 0x3ff; /* 10 bits */
- }
- b <<= 3;
- b &= R300_LOD_BIAS_MASK;
-
- t->filter_1 &= ~R300_LOD_BIAS_MASK;
- t->filter_1 |= b;
-}
-
/**
* Allocate space for and load the mesa images into the texture memory block.
* This will happen before drawing with a new texture, or drawing with a
t->dirty_images[0] |= (1 << level);
}
-/* This feels like a prime target for code reuse, so I'm putting it here
- * instead of inlining it in TexEnv. */
-static GLenum r300TexUnitTarget(struct gl_texture_unit *unit) {
- if (unit->_ReallyEnabled & (TEXTURE_RECT_BIT)) {
- return GL_TEXTURE_RECTANGLE_NV;
- } else if (unit->_ReallyEnabled & (TEXTURE_1D_BIT)) {
- return GL_TEXTURE_1D;
- } else if (unit->_ReallyEnabled & (TEXTURE_2D_BIT)) {
- return GL_TEXTURE_2D;
- } else if (unit->_ReallyEnabled & (TEXTURE_3D_BIT)) {
- return GL_TEXTURE_3D;
- } else if (unit->_ReallyEnabled & (TEXTURE_CUBE_BIT)) {
- return GL_TEXTURE_CUBE_MAP;
- }
- if (unit->Enabled & (TEXTURE_RECT_BIT)) {
- return GL_TEXTURE_RECTANGLE_NV;
- } else if (unit->Enabled & (TEXTURE_1D_BIT)) {
- return GL_TEXTURE_1D;
- } else if (unit->Enabled & (TEXTURE_2D_BIT)) {
- return GL_TEXTURE_2D;
- } else if (unit->Enabled & (TEXTURE_3D_BIT)) {
- return GL_TEXTURE_3D;
- } else if (unit->Enabled & (TEXTURE_CUBE_BIT)) {
- return GL_TEXTURE_CUBE_MAP;
- }
- return 0;
-}
-
-static void r300TexEnv(GLcontext * ctx, GLenum target,
- GLenum pname, const GLfloat * param)
-{
- r300ContextPtr rmesa = R300_CONTEXT(ctx);
- if (RADEON_DEBUG & DEBUG_STATE) {
- fprintf(stderr, "%s( %s )\n",
- __FUNCTION__, _mesa_lookup_enum_by_nr(pname));
- }
-
- /* This is incorrect: Need to maintain this data for each of
- * GL_TEXTURE_{123}D, GL_TEXTURE_RECTANGLE_NV, etc, and switch
- * between them according to _ReallyEnabled.
- */
- switch (pname) {
- case GL_TEXTURE_LOD_BIAS_EXT: {
- GLfloat bias, min;
-
- /* The R300's LOD bias is a signed 2's complement value with a
- * range of -16.0 <= bias < 16.0.
- *
- * NOTE: Add a small bias to the bias for conform mipsel.c test.
- */
- bias = *param + .01;
- min = driQueryOptionb(&rmesa->radeon.optionCache,
- "no_neg_lod_bias") ? 0.0 : -16.0;
- bias = CLAMP(bias, min, 16.0);
-
- /* There's probably a magic Mesa method for finding the REAL
- * texture unit. I don't know it, though. */
- if (!(ctx->Texture._EnabledUnits & (1 << ctx->Texture.CurrentUnit))) {
- break;
- }
-
- /* Save our newly clamped LOD bias. */
- ctx->Texture.Unit[ctx->Texture.CurrentUnit].LodBias = bias;
-
- break;
- }
-
- default:
- return;
- }
-}
-
/**
* Changes variables and flags for a state update, which will happen at the
* next UpdateTextureState
return NULL;
obj->MaxAnisotropy = rmesa->initialMaxAnisotropy;
- /* Attempt to fill LOD bias, if previously set.
- * Should start at 0.0, which won't affect the HW. */
- obj->LodBias = rmesa->LODBias;
-
r300AllocTexObj(obj);
return obj;
}
functions->DeleteTexture = r300DeleteTexture;
functions->IsTextureResident = driIsTextureResident;
- functions->TexEnv = r300TexEnv;
functions->TexParameter = r300TexParameter;
functions->CompressedTexImage2D = r300CompressedTexImage2D;
DRI_CONF_SECTION_QUALITY
DRI_CONF_TEXTURE_DEPTH(DRI_CONF_TEXTURE_DEPTH_FB)
DRI_CONF_DEF_MAX_ANISOTROPY(1.0, "1.0,2.0,4.0,8.0,16.0")
- DRI_CONF_NO_NEG_LOD_BIAS(false)
- DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
DRI_CONF_DISABLE_S3TC(false)
DRI_CONF_COLOR_REDUCTION(DRI_CONF_COLOR_REDUCTION_DITHER)
DRI_CONF_ROUND_MODE(DRI_CONF_ROUND_TRUNC)
DRI_CONF_NO_RAST(false)
DRI_CONF_SECTION_END
DRI_CONF_END;
-static const GLuint __driNConfigOptions = 18;
+static const GLuint __driNConfigOptions = 17;
#ifndef RADEON_DEBUG
int RADEON_DEBUG = 0;
{
int ret;
drm_radeon_getparam_t gp;
-
+
gp.param = param;
gp.value = value;
-
+
ret = drmCommandWriteRead( fd, DRM_RADEON_GETPARAM, &gp, sizeof(gp));
return ret;
}
depth_bits_array[0] = depth_bits;
depth_bits_array[1] = depth_bits;
-
+
/* Just like with the accumulation buffer, always provide some modes
* with a stencil buffer. It will be a sw fallback, but some apps won't
* care about that.
int ret;
ret = radeonGetParam( sPriv->fd, RADEON_PARAM_GART_BUFFER_OFFSET,
&screen->gart_buffer_offset);
-
+
if (ret) {
FREE( screen );
fprintf(stderr, "drm_radeon_getparam_t (RADEON_PARAM_GART_BUFFER_OFFSET): %d\n", ret);
/**
* This is the driver specific part of the createNewScreen entry point.
- *
+ *
* \todo maybe fold this into intelInitDriver
*
* \return the __GLcontextModes supported by this driver