X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fradeon%2Fradeon_texture.c;h=2b655fbd953f8ebcc73c57d0fe18b7845f48fa36;hb=f0f04cd12db156ec53b7ea46fae27199af121f90;hp=c715650d55f6e4cad1cd352100d6968b31a5d23e;hpb=e8f0c8ab9d3509dc399ea58c320056ed90895792;p=mesa.git diff --git a/src/mesa/drivers/dri/radeon/radeon_texture.c b/src/mesa/drivers/dri/radeon/radeon_texture.c index c715650d55f..2b655fbd953 100644 --- a/src/mesa/drivers/dri/radeon/radeon_texture.c +++ b/src/mesa/drivers/dri/radeon/radeon_texture.c @@ -33,12 +33,12 @@ #include "main/imports.h" #include "main/context.h" #include "main/convolve.h" +#include "main/enums.h" #include "main/mipmap.h" #include "main/texcompress.h" #include "main/texstore.h" #include "main/teximage.h" #include "main/texobj.h" -#include "main/texgetimage.h" #include "xmlpool.h" /* for symbolic values of enum-type options */ @@ -53,6 +53,13 @@ void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride, assert(rowsize <= dststride); assert(rowsize <= srcstride); + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s dst %p, stride %u, src %p, stride %u, " + "numrows %u, rowsize %u.\n", + __func__, dst, dststride, + src, srcstride, + numrows, rowsize); + if (rowsize == srcstride && rowsize == dststride) { memcpy(dst, src, numrows*rowsize); } else { @@ -102,8 +109,12 @@ static void teximage_set_map_data(radeon_texture_image *image) { radeon_mipmap_level *lvl; - if (!image->mt) + if (!image->mt) { + radeon_warning("%s(%p) Trying to set map data without miptree.\n", + __func__, image); + return; + } lvl = &image->mt->levels[image->mtlevel]; @@ -117,6 +128,10 @@ static void teximage_set_map_data(radeon_texture_image *image) */ void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(img %p), write_enable %s.\n", + __func__, image, + write_enable ? "true": "false"); if (image->mt) { assert(!image->base.Data); @@ -128,6 +143,9 @@ void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable) void radeon_teximage_unmap(radeon_texture_image *image) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(img %p)\n", + __func__, image); if (image->mt) { assert(image->base.Data); @@ -162,15 +180,31 @@ void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj) radeonTexObj* t = radeon_tex_obj(texObj); int face, level; - if (!radeon_validate_texture_miptree(ctx, texObj)) - return; + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p)\n", + __func__, ctx, texObj); + + if (!radeon_validate_texture_miptree(ctx, texObj)) { + radeon_error("%s(%p, tex %p) Failed to validate miptree for " + "sw fallback.\n", + __func__, ctx, texObj); + return; + } + + if (t->image_override && t->bo) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p) Work around for missing miptree in r100.\n", + __func__, ctx, texObj); - /* for r100 3D sw fallbacks don't have mt */ - if (t->image_override && t->bo) map_override(ctx, t); + } - if (!t->mt) + /* for r100 3D sw fallbacks don't have mt */ + if (!t->mt) { + radeon_warning("%s(%p, tex %p) No miptree in texture.\n", + __func__, ctx, texObj); return; + } radeon_bo_map(t->mt->bo, GL_FALSE); for(face = 0; face < t->mt->faces; ++face) { @@ -184,6 +218,10 @@ void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj) radeonTexObj* t = radeon_tex_obj(texObj); int face, level; + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p)\n", + __func__, ctx, texObj); + if (t->image_override && t->bo) unmap_override(ctx, t); /* for r100 3D sw fallbacks don't have mt */ @@ -197,21 +235,6 @@ void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj) radeon_bo_unmap(t->mt->bo); } -GLuint radeon_face_for_target(GLenum target) -{ - switch (target) { - case GL_TEXTURE_CUBE_MAP_POSITIVE_X: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_X: - case GL_TEXTURE_CUBE_MAP_POSITIVE_Y: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y: - case GL_TEXTURE_CUBE_MAP_POSITIVE_Z: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z: - return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X; - default: - return 0; - } -} - /** * Wraps Mesa's implementation to ensure that the base level image is mapped. * @@ -225,6 +248,10 @@ static void radeon_generate_mipmap(GLcontext *ctx, GLenum target, GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1; int i, face; + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p) Target type %s.\n", + __func__, ctx, texObj, + _mesa_lookup_enum_by_nr(target)); _mesa_generate_mipmap(ctx, target, texObj); @@ -248,8 +275,24 @@ static void radeon_generate_mipmap(GLcontext *ctx, GLenum target, void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj) { - GLuint face = radeon_face_for_target(target); + radeonContextPtr rmesa = RADEON_CONTEXT(ctx); + struct radeon_bo *bo; + GLuint face = _mesa_tex_target_to_face(target); radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]); + bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo; + + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p, target %s, tex %p)\n", + __func__, ctx, _mesa_lookup_enum_by_nr(target), + texObj); + + if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s(%p, tex %p) Trying to generate mipmap for texture " + "in processing by GPU.\n", + __func__, ctx, texObj); + radeon_firevertices(rmesa); + } radeon_teximage_map(baseimage, GL_FALSE); radeon_generate_mipmap(ctx, target, texObj); @@ -312,12 +355,14 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16); (void)format; -#if 0 - fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n", + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s InternalFormat=%s(%d) type=%s format=%s\n", + __func__, _mesa_lookup_enum_by_nr(internalFormat), internalFormat, _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format)); - fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt); -#endif + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s do32bpt=%d force16bpt=%d\n", + __func__, do32bpt, force16bpt); switch (internalFormat) { case 4: @@ -472,6 +517,19 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, case GL_RGBA32F_ARB: return MESA_FORMAT_RGBA_FLOAT32; +#ifdef RADEON_R300 + case GL_DEPTH_COMPONENT: + case GL_DEPTH_COMPONENT16: + return MESA_FORMAT_Z16; + case GL_DEPTH_COMPONENT24: + case GL_DEPTH_COMPONENT32: + case GL_DEPTH_STENCIL_EXT: + case GL_DEPTH24_STENCIL8_EXT: + if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515) + return MESA_FORMAT_S8_Z24; + else + return MESA_FORMAT_Z16; +#else case GL_DEPTH_COMPONENT: case GL_DEPTH_COMPONENT16: case GL_DEPTH_COMPONENT24: @@ -479,6 +537,7 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, case GL_DEPTH_STENCIL_EXT: case GL_DEPTH24_STENCIL8_EXT: return MESA_FORMAT_S8_Z24; +#endif /* EXT_texture_sRGB */ case GL_SRGB: @@ -499,6 +558,15 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, case GL_COMPRESSED_SLUMINANCE_ALPHA: return MESA_FORMAT_SLA8; + case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT: + return MESA_FORMAT_SRGB_DXT1; + case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT: + return MESA_FORMAT_SRGBA_DXT1; + case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT: + return MESA_FORMAT_SRGBA_DXT3; + case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT: + return MESA_FORMAT_SRGBA_DXT5; + default: _mesa_problem(ctx, "unexpected internalFormat 0x%x in %s", @@ -509,6 +577,30 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, return MESA_FORMAT_NONE; /* never get here */ } +/** Check if given image is valid within current texture object. + */ +static int image_matches_texture_obj(struct gl_texture_object *texObj, + struct gl_texture_image *texImage, + unsigned level) +{ + const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel]; + + if (!baseImage) + return 0; + + if (level < texObj->BaseLevel || level > texObj->MaxLevel) + return 0; + + const unsigned levelDiff = level - texObj->BaseLevel; + const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1); + const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1); + const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1); + + return (texImage->Width == refWidth && + texImage->Height == refHeight && + texImage->Depth == refDepth); +} + static void teximage_assign_miptree(radeonContextPtr rmesa, struct gl_texture_object *texObj, struct gl_texture_image *texImage, @@ -518,25 +610,31 @@ static void teximage_assign_miptree(radeonContextPtr rmesa, radeonTexObj *t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); + /* Since miptree holds only images for levels + * don't allocate the miptree if the teximage won't fit. + */ + if (!image_matches_texture_obj(texObj, texImage, level)) + return; + /* Try using current miptree, or create new if there isn't any */ - if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, - radeon_gl_level_to_miptree_level(texObj, level))) { + if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) { radeon_miptree_unreference(&t->mt); radeon_try_alloc_miptree(rmesa, t); - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "%s: texObj %p, texImage %p, face %d, level %d, " + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s: texObj %p, texImage %p, face %d, level %d, " "texObj miptree doesn't match, allocated new miptree %p\n", __FUNCTION__, texObj, texImage, face, level, t->mt); - } } /* Miptree alocation may have failed, * when there was no image for baselevel specified */ if (t->mt) { image->mtface = face; - image->mtlevel = radeon_gl_level_to_miptree_level(texObj, level); + image->mtlevel = level; radeon_miptree_reference(t->mt, &image->mt); - } + } else + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s Failed to allocate miptree.\n", __func__); } static GLuint * allocate_image_offsets(GLcontext *ctx, @@ -547,7 +645,7 @@ static GLuint * allocate_image_offsets(GLcontext *ctx, int i; GLuint *offsets; - offsets = _mesa_malloc(depth * sizeof(GLuint)) ; + offsets = malloc(depth * sizeof(GLuint)) ; if (!offsets) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image"); return NULL; @@ -574,26 +672,33 @@ static void radeon_store_teximage(GLcontext* ctx, int dims, struct gl_texture_image *texImage, int compressed) { + radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObj *t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); GLuint dstRowStride; GLuint *dstImageOffsets; + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p, tex %p, image %p) compressed %d\n", + __func__, ctx, texObj, texImage, compressed); + if (image->mt) { dstRowStride = image->mt->levels[image->mtlevel].rowstride; } else if (t->bo) { /* TFP case */ - /* TODO */ - assert(0); + dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0); } else { dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); } + assert(dstRowStride); + if (dims == 3) { unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat); dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth); if (!dstImageOffsets) { + radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__); return; } } else { @@ -639,7 +744,7 @@ static void radeon_store_teximage(GLcontext* ctx, int dims, } if (dims == 3) { - _mesa_free(dstImageOffsets); + free(dstImageOffsets); } radeon_teximage_unmap(image); @@ -665,20 +770,23 @@ static void radeon_teximage( radeon_texture_image* image = get_radeon_texture_image(texImage); GLint postConvWidth = width; GLint postConvHeight = height; - GLuint face = radeon_face_for_target(target); + GLuint face = _mesa_tex_target_to_face(target); + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s %dd: texObj %p, texImage %p, face %d, level %d\n", + __func__, dims, texObj, texImage, face, level); { struct radeon_bo *bo; bo = !image->mt ? image->bo : image->mt->bo; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s Calling teximage for texture that is " + "queued for GPU processing.\n", + __func__); radeon_firevertices(rmesa); } } - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "radeon_teximage%dd: texObj %p, texImage %p, face %d, level %d\n", - dims, texObj, texImage, face, level); - } t->validated = GL_FALSE; @@ -704,17 +812,16 @@ static void radeon_teximage( if (!t->bo) { teximage_assign_miptree(rmesa, texObj, texImage, face, level); - if (!t->mt) { + if (!image->mt) { int size = _mesa_format_image_size(texImage->TexFormat, texImage->Width, texImage->Height, texImage->Depth); texImage->Data = _mesa_alloc_texmemory(size); - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "radeon_teximage%dd: texObj %p, texImage %p, " + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s %dd: texObj %p, texImage %p, " " no miptree assigned, using local memory %p\n", - dims, texObj, texImage, texImage->Data); - } + __func__, dims, texObj, texImage, texImage->Data); } } @@ -808,18 +915,22 @@ static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int leve radeonTexObj* t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s %dd: texObj %p, texImage %p, face %d, level %d\n", + __func__, dims, texObj, texImage, + _mesa_tex_target_to_face(target), level); { struct radeon_bo *bo; bo = !image->mt ? image->bo : image->mt->bo; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s Calling texsubimage for texture that is " + "queued for GPU processing.\n", + __func__); radeon_firevertices(rmesa); } } - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "radeon_texsubimage%dd: texObj %p, texImage %p, face %d, level %d\n", - dims, texObj, texImage, radeon_face_for_target(target), level); - } t->validated = GL_FALSE; if (compressed) { @@ -896,57 +1007,18 @@ void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level, format, type, pixels, packing, texObj, texImage, 0); } -/** - * Need to map texture image into memory before copying image data, - * then unmap it. - */ -static void -radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level, - GLenum format, GLenum type, GLvoid * pixels, - struct gl_texture_object *texObj, - struct gl_texture_image *texImage, int compressed) +unsigned radeonIsFormatRenderable(gl_format mesa_format) { - radeon_texture_image *image = get_radeon_texture_image(texImage); + if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 || + mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444) + return 1; - if (image->mt) { - /* Map the texture image read-only */ - radeon_teximage_map(image, GL_FALSE); - } else { - /* Image hasn't been uploaded to a miptree yet */ - assert(image->base.Data); - } - - if (compressed) { - /* FIXME: this can't work for small textures (mips) which - use different hw stride */ - _mesa_get_compressed_teximage(ctx, target, level, pixels, - texObj, texImage); - } else { - _mesa_get_teximage(ctx, target, level, format, type, pixels, - texObj, texImage); - } - - if (image->mt) { - radeon_teximage_unmap(image); + switch (mesa_format) + { + case MESA_FORMAT_Z16: + case MESA_FORMAT_S8_Z24: + return 1; + default: + return 0; } } - -void -radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level, - GLenum format, GLenum type, GLvoid * pixels, - struct gl_texture_object *texObj, - struct gl_texture_image *texImage) -{ - radeon_get_tex_image(ctx, target, level, format, type, pixels, - texObj, texImage, 0); -} - -void -radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level, - GLvoid *pixels, - struct gl_texture_object *texObj, - struct gl_texture_image *texImage) -{ - radeon_get_tex_image(ctx, target, level, 0, 0, pixels, - texObj, texImage, 1); -}