X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fradeon%2Fradeon_texture.c;h=d2b190e42e0ec2e46e50aefd1cbedc2a4525266d;hb=ba03a0b5ba73bc8e79d0ffa6d1da623544716f74;hp=0390d376ba231b2cc22229da846510b3219d7e32;hpb=9c6a9363ef96c00dd0ad63e340b32479e43fea45;p=mesa.git diff --git a/src/mesa/drivers/dri/radeon/radeon_texture.c b/src/mesa/drivers/dri/radeon/radeon_texture.c index 0390d376ba2..d2b190e42e0 100644 --- a/src/mesa/drivers/dri/radeon/radeon_texture.c +++ b/src/mesa/drivers/dri/radeon/radeon_texture.c @@ -33,12 +33,13 @@ #include "main/imports.h" #include "main/context.h" #include "main/convolve.h" +#include "main/enums.h" #include "main/mipmap.h" #include "main/texcompress.h" #include "main/texstore.h" #include "main/teximage.h" #include "main/texobj.h" -#include "main/texgetimage.h" +#include "drivers/common/meta.h" #include "xmlpool.h" /* for symbolic values of enum-type options */ @@ -53,6 +54,13 @@ void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride, assert(rowsize <= dststride); assert(rowsize <= srcstride); + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s dst %p, stride %u, src %p, stride %u, " + "numrows %u, rowsize %u.\n", + __func__, dst, dststride, + src, srcstride, + numrows, rowsize); + if (rowsize == srcstride && rowsize == dststride) { memcpy(dst, src, numrows*rowsize); } else { @@ -102,8 +110,12 @@ static void teximage_set_map_data(radeon_texture_image *image) { radeon_mipmap_level *lvl; - if (!image->mt) + if (!image->mt) { + radeon_warning("%s(%p) Trying to set map data without miptree.\n", + __func__, image); + return; + } lvl = &image->mt->levels[image->mtlevel]; @@ -117,6 +129,10 @@ static void teximage_set_map_data(radeon_texture_image *image) */ void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(img %p), write_enable %s.\n", + __func__, image, + write_enable ? "true": "false"); if (image->mt) { assert(!image->base.Data); @@ -128,6 +144,9 @@ void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable) void radeon_teximage_unmap(radeon_texture_image *image) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(img %p)\n", + __func__, image); if (image->mt) { assert(image->base.Data); @@ -162,15 +181,31 @@ void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj) radeonTexObj* t = radeon_tex_obj(texObj); int face, level; - if (!radeon_validate_texture_miptree(ctx, texObj)) - return; + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p)\n", + __func__, ctx, texObj); + + if (!radeon_validate_texture_miptree(ctx, texObj)) { + radeon_error("%s(%p, tex %p) Failed to validate miptree for " + "sw fallback.\n", + __func__, ctx, texObj); + return; + } + + if (t->image_override && t->bo) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p) Work around for missing miptree in r100.\n", + __func__, ctx, texObj); - /* for r100 3D sw fallbacks don't have mt */ - if (t->image_override && t->bo) map_override(ctx, t); + } - if (!t->mt) + /* for r100 3D sw fallbacks don't have mt */ + if (!t->mt) { + radeon_warning("%s(%p, tex %p) No miptree in texture.\n", + __func__, ctx, texObj); return; + } radeon_bo_map(t->mt->bo, GL_FALSE); for(face = 0; face < t->mt->faces; ++face) { @@ -184,6 +219,10 @@ void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj) radeonTexObj* t = radeon_tex_obj(texObj); int face, level; + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p)\n", + __func__, ctx, texObj); + if (t->image_override && t->bo) unmap_override(ctx, t); /* for r100 3D sw fallbacks don't have mt */ @@ -197,21 +236,6 @@ void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj) radeon_bo_unmap(t->mt->bo); } -GLuint radeon_face_for_target(GLenum target) -{ - switch (target) { - case GL_TEXTURE_CUBE_MAP_POSITIVE_X: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_X: - case GL_TEXTURE_CUBE_MAP_POSITIVE_Y: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y: - case GL_TEXTURE_CUBE_MAP_POSITIVE_Z: - case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z: - return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X; - default: - return 0; - } -} - /** * Wraps Mesa's implementation to ensure that the base level image is mapped. * @@ -225,6 +249,10 @@ static void radeon_generate_mipmap(GLcontext *ctx, GLenum target, GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1; int i, face; + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s(%p, tex %p) Target type %s.\n", + __func__, ctx, texObj, + _mesa_lookup_enum_by_nr(target)); _mesa_generate_mipmap(ctx, target, texObj); @@ -248,12 +276,32 @@ static void radeon_generate_mipmap(GLcontext *ctx, GLenum target, void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj) { - GLuint face = radeon_face_for_target(target); + radeonContextPtr rmesa = RADEON_CONTEXT(ctx); + struct radeon_bo *bo; + GLuint face = _mesa_tex_target_to_face(target); radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]); + bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo; + + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p, target %s, tex %p)\n", + __func__, ctx, _mesa_lookup_enum_by_nr(target), + texObj); + + if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s(%p, tex %p) Trying to generate mipmap for texture " + "in processing by GPU.\n", + __func__, ctx, texObj); + radeon_firevertices(rmesa); + } - radeon_teximage_map(baseimage, GL_FALSE); - radeon_generate_mipmap(ctx, target, texObj); - radeon_teximage_unmap(baseimage); + if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) { + radeon_teximage_map(baseimage, GL_FALSE); + radeon_generate_mipmap(ctx, target, texObj); + radeon_teximage_unmap(baseimage); + } else { + _mesa_meta_GenerateMipmap(ctx, target, texObj); + } } @@ -312,12 +360,14 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16); (void)format; -#if 0 - fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n", + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s InternalFormat=%s(%d) type=%s format=%s\n", + __func__, _mesa_lookup_enum_by_nr(internalFormat), internalFormat, _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format)); - fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt); -#endif + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s do32bpt=%d force16bpt=%d\n", + __func__, do32bpt, force16bpt); switch (internalFormat) { case 4: @@ -472,6 +522,19 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, case GL_RGBA32F_ARB: return MESA_FORMAT_RGBA_FLOAT32; +#ifdef RADEON_R300 + case GL_DEPTH_COMPONENT: + case GL_DEPTH_COMPONENT16: + return MESA_FORMAT_Z16; + case GL_DEPTH_COMPONENT24: + case GL_DEPTH_COMPONENT32: + case GL_DEPTH_STENCIL_EXT: + case GL_DEPTH24_STENCIL8_EXT: + if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515) + return MESA_FORMAT_S8_Z24; + else + return MESA_FORMAT_Z16; +#else case GL_DEPTH_COMPONENT: case GL_DEPTH_COMPONENT16: case GL_DEPTH_COMPONENT24: @@ -479,6 +542,7 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, case GL_DEPTH_STENCIL_EXT: case GL_DEPTH24_STENCIL8_EXT: return MESA_FORMAT_S8_Z24; +#endif /* EXT_texture_sRGB */ case GL_SRGB: @@ -499,6 +563,15 @@ gl_format radeonChooseTextureFormat(GLcontext * ctx, case GL_COMPRESSED_SLUMINANCE_ALPHA: return MESA_FORMAT_SLA8; + case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT: + return MESA_FORMAT_SRGB_DXT1; + case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT: + return MESA_FORMAT_SRGBA_DXT1; + case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT: + return MESA_FORMAT_SRGBA_DXT3; + case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT: + return MESA_FORMAT_SRGBA_DXT5; + default: _mesa_problem(ctx, "unexpected internalFormat 0x%x in %s", @@ -515,15 +588,18 @@ static int image_matches_texture_obj(struct gl_texture_object *texObj, struct gl_texture_image *texImage, unsigned level) { - const struct gl_texture_image *baseImage = texObj->Image[0][level]; + const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel]; + + if (!baseImage) + return 0; if (level < texObj->BaseLevel || level > texObj->MaxLevel) return 0; const unsigned levelDiff = level - texObj->BaseLevel; - const unsigned refWidth = baseImage->Width >> levelDiff; - const unsigned refHeight = baseImage->Height >> levelDiff; - const unsigned refDepth = baseImage->Depth >> levelDiff; + const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1); + const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1); + const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1); return (texImage->Width == refWidth && texImage->Height == refHeight && @@ -549,11 +625,10 @@ static void teximage_assign_miptree(radeonContextPtr rmesa, if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) { radeon_miptree_unreference(&t->mt); radeon_try_alloc_miptree(rmesa, t); - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "%s: texObj %p, texImage %p, face %d, level %d, " + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s: texObj %p, texImage %p, face %d, level %d, " "texObj miptree doesn't match, allocated new miptree %p\n", __FUNCTION__, texObj, texImage, face, level, t->mt); - } } /* Miptree alocation may have failed, @@ -562,7 +637,9 @@ static void teximage_assign_miptree(radeonContextPtr rmesa, image->mtface = face; image->mtlevel = level; radeon_miptree_reference(t->mt, &image->mt); - } + } else + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s Failed to allocate miptree.\n", __func__); } static GLuint * allocate_image_offsets(GLcontext *ctx, @@ -573,7 +650,7 @@ static GLuint * allocate_image_offsets(GLcontext *ctx, int i; GLuint *offsets; - offsets = _mesa_malloc(depth * sizeof(GLuint)) ; + offsets = malloc(depth * sizeof(GLuint)) ; if (!offsets) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image"); return NULL; @@ -600,18 +677,22 @@ static void radeon_store_teximage(GLcontext* ctx, int dims, struct gl_texture_image *texImage, int compressed) { + radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObj *t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); GLuint dstRowStride; GLuint *dstImageOffsets; + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p, tex %p, image %p) compressed %d\n", + __func__, ctx, texObj, texImage, compressed); + if (image->mt) { dstRowStride = image->mt->levels[image->mtlevel].rowstride; } else if (t->bo) { /* TFP case */ - /* TODO */ - assert(0); + dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0); } else { dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); } @@ -622,6 +703,7 @@ static void radeon_store_teximage(GLcontext* ctx, int dims, unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat); dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth); if (!dstImageOffsets) { + radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__); return; } } else { @@ -667,7 +749,7 @@ static void radeon_store_teximage(GLcontext* ctx, int dims, } if (dims == 3) { - _mesa_free(dstImageOffsets); + free(dstImageOffsets); } radeon_teximage_unmap(image); @@ -693,20 +775,23 @@ static void radeon_teximage( radeon_texture_image* image = get_radeon_texture_image(texImage); GLint postConvWidth = width; GLint postConvHeight = height; - GLuint face = radeon_face_for_target(target); + GLuint face = _mesa_tex_target_to_face(target); + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s %dd: texObj %p, texImage %p, face %d, level %d\n", + __func__, dims, texObj, texImage, face, level); { struct radeon_bo *bo; bo = !image->mt ? image->bo : image->mt->bo; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s Calling teximage for texture that is " + "queued for GPU processing.\n", + __func__); radeon_firevertices(rmesa); } } - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "radeon_teximage%dd: texObj %p, texImage %p, face %d, level %d\n", - dims, texObj, texImage, face, level); - } t->validated = GL_FALSE; @@ -738,11 +823,10 @@ static void radeon_teximage( texImage->Height, texImage->Depth); texImage->Data = _mesa_alloc_texmemory(size); - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "radeon_teximage%dd: texObj %p, texImage %p, " + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s %dd: texObj %p, texImage %p, " " no miptree assigned, using local memory %p\n", - dims, texObj, texImage, texImage->Data); - } + __func__, dims, texObj, texImage, texImage->Data); } } @@ -836,18 +920,22 @@ static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int leve radeonTexObj* t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s %dd: texObj %p, texImage %p, face %d, level %d\n", + __func__, dims, texObj, texImage, + _mesa_tex_target_to_face(target), level); { struct radeon_bo *bo; bo = !image->mt ? image->bo : image->mt->bo; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s Calling texsubimage for texture that is " + "queued for GPU processing.\n", + __func__); radeon_firevertices(rmesa); } } - if (RADEON_DEBUG & RADEON_TEXTURE) { - fprintf(stderr, "radeon_texsubimage%dd: texObj %p, texImage %p, face %d, level %d\n", - dims, texObj, texImage, radeon_face_for_target(target), level); - } t->validated = GL_FALSE; if (compressed) { @@ -924,57 +1012,18 @@ void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level, format, type, pixels, packing, texObj, texImage, 0); } -/** - * Need to map texture image into memory before copying image data, - * then unmap it. - */ -static void -radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level, - GLenum format, GLenum type, GLvoid * pixels, - struct gl_texture_object *texObj, - struct gl_texture_image *texImage, int compressed) +unsigned radeonIsFormatRenderable(gl_format mesa_format) { - radeon_texture_image *image = get_radeon_texture_image(texImage); + if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 || + mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444) + return 1; - if (image->mt) { - /* Map the texture image read-only */ - radeon_teximage_map(image, GL_FALSE); - } else { - /* Image hasn't been uploaded to a miptree yet */ - assert(image->base.Data); - } - - if (compressed) { - /* FIXME: this can't work for small textures (mips) which - use different hw stride */ - _mesa_get_compressed_teximage(ctx, target, level, pixels, - texObj, texImage); - } else { - _mesa_get_teximage(ctx, target, level, format, type, pixels, - texObj, texImage); - } - - if (image->mt) { - radeon_teximage_unmap(image); + switch (mesa_format) + { + case MESA_FORMAT_Z16: + case MESA_FORMAT_S8_Z24: + return 1; + default: + return 0; } } - -void -radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level, - GLenum format, GLenum type, GLvoid * pixels, - struct gl_texture_object *texObj, - struct gl_texture_image *texImage) -{ - radeon_get_tex_image(ctx, target, level, format, type, pixels, - texObj, texImage, 0); -} - -void -radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level, - GLvoid *pixels, - struct gl_texture_object *texObj, - struct gl_texture_image *texImage) -{ - radeon_get_tex_image(ctx, target, level, 0, 0, pixels, - texObj, texImage, 1); -}