X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fradeon%2Fradeon_mipmap_tree.c;h=7f5fb99fa4fa5a1cdfbfe701cb25b3647c4ba6f7;hb=4fb2daf42c8171579cdc18605c5ceeb1963f8b31;hp=dadc72f4c1b142ade5cd0a834563f192adf7048a;hpb=4e4c2ee1fd574d1d651c559f46afb6ca5487156d;p=mesa.git diff --git a/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c b/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c index dadc72f4c1b..7f5fb99fa4f 100644 --- a/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c +++ b/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c @@ -1,4 +1,5 @@ /* + * Copyright (C) 2009 Maciej Cencora. * Copyright (C) 2008 Nicolai Haehnle. * * All Rights Reserved. @@ -31,51 +32,89 @@ #include #include "main/simple_list.h" -#include "main/texcompress.h" +#include "main/teximage.h" +#include "main/texobj.h" +#include "main/enums.h" +#include "radeon_texture.h" +#include "radeon_tile.h" + +static unsigned get_aligned_compressed_row_stride( + gl_format format, + unsigned width, + unsigned minStride) +{ + const unsigned blockBytes = _mesa_get_format_bytes(format); + unsigned blockWidth, blockHeight; + unsigned stride; + + _mesa_get_format_block_size(format, &blockWidth, &blockHeight); + + /* Count number of blocks required to store the given width. + * And then multiple it with bytes required to store a block. + */ + stride = (width + blockWidth - 1) / blockWidth * blockBytes; + + /* Round the given minimum stride to the next full blocksize. + * (minStride + blockBytes - 1) / blockBytes * blockBytes + */ + if ( stride < minStride ) + stride = (minStride + blockBytes - 1) / blockBytes * blockBytes; + + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s width %u, minStride %u, block(bytes %u, width %u):" + "stride %u\n", + __func__, width, minStride, + blockBytes, blockWidth, + stride); + + return stride; +} -static GLuint radeon_compressed_texture_size(GLcontext *ctx, - GLsizei width, GLsizei height, GLsizei depth, - GLuint mesaFormat) +unsigned get_texture_image_size( + gl_format format, + unsigned rowStride, + unsigned height, + unsigned depth, + unsigned tiling) { - GLuint size = _mesa_format_image_size(mesaFormat, width, height, depth); - - if (mesaFormat == MESA_FORMAT_RGB_DXT1 || - mesaFormat == MESA_FORMAT_RGBA_DXT1) { - if (width + 3 < 8) /* width one block */ - size = size * 4; - else if (width + 3 < 16) - size = size * 2; - } else { - /* DXT3/5, 16 bytes per block */ - // WARN_ONCE("DXT 3/5 suffers from multitexturing problems!\n"); - if (width + 3 < 8) - size = size * 2; + if (_mesa_is_format_compressed(format)) { + unsigned blockWidth, blockHeight; + + _mesa_get_format_block_size(format, &blockWidth, &blockHeight); + + return rowStride * ((height + blockHeight - 1) / blockHeight) * depth; + } else if (tiling) { + /* Need to align height to tile height */ + unsigned tileWidth, tileHeight; + + get_tile_size(format, &tileWidth, &tileHeight); + tileHeight--; + + height = (height + tileHeight) & ~tileHeight; } - return size; + return rowStride * height * depth; } - -static int radeon_compressed_num_bytes(GLuint mesaFormat) +unsigned get_texture_image_row_stride(radeonContextPtr rmesa, gl_format format, unsigned width, unsigned tiling) { - int bytes = 0; - switch(mesaFormat) { - - case MESA_FORMAT_RGB_FXT1: - case MESA_FORMAT_RGBA_FXT1: - case MESA_FORMAT_RGB_DXT1: - case MESA_FORMAT_RGBA_DXT1: - bytes = 2; - break; - - case MESA_FORMAT_RGBA_DXT3: - case MESA_FORMAT_RGBA_DXT5: - bytes = 4; - default: - break; - } - - return bytes; + if (_mesa_is_format_compressed(format)) { + return get_aligned_compressed_row_stride(format, width, rmesa->texture_compressed_row_align); + } else { + unsigned row_align; + + if (!_mesa_is_pow_two(width)) { + row_align = rmesa->texture_rect_row_align - 1; + } else if (tiling) { + unsigned tileWidth, tileHeight; + get_tile_size(format, &tileWidth, &tileHeight); + row_align = tileWidth * _mesa_get_format_bytes(format) - 1; + } else { + row_align = rmesa->texture_row_align - 1; + } + + return (_mesa_format_row_stride(format, width) + row_align) & ~row_align; + } } /** @@ -89,41 +128,23 @@ static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree GLuint face, GLuint level, GLuint* curOffset) { radeon_mipmap_level *lvl = &mt->levels[level]; - uint32_t row_align; - - /* Find image size in bytes */ - if (mt->compressed) { - /* TODO: Is this correct? Need test cases for compressed textures! */ - row_align = rmesa->texture_compressed_row_align - 1; - lvl->rowstride = (lvl->width * mt->bpp + row_align) & ~row_align; - lvl->size = radeon_compressed_texture_size(mt->radeon->glCtx, - lvl->width, lvl->height, lvl->depth, mt->compressed); - } else if (mt->target == GL_TEXTURE_RECTANGLE_NV) { - row_align = rmesa->texture_rect_row_align - 1; - lvl->rowstride = (lvl->width * mt->bpp + row_align) & ~row_align; - lvl->size = lvl->rowstride * lvl->height; - } else if (mt->tilebits & RADEON_TXO_MICRO_TILE) { - /* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned, - * though the actual offset may be different (if texture is less than - * 32 bytes width) to the untiled case */ - lvl->rowstride = (lvl->width * mt->bpp * 2 + 31) & ~31; - lvl->size = lvl->rowstride * ((lvl->height + 1) / 2) * lvl->depth; - } else { - row_align = rmesa->texture_row_align - 1; - lvl->rowstride = (lvl->width * mt->bpp + row_align) & ~row_align; - lvl->size = lvl->rowstride * lvl->height * lvl->depth; - } + GLuint height; + + height = _mesa_next_pow_two_32(lvl->height); + + lvl->rowstride = get_texture_image_row_stride(rmesa, mt->mesaFormat, lvl->width, mt->tilebits); + lvl->size = get_texture_image_size(mt->mesaFormat, lvl->rowstride, lvl->height, lvl->depth, mt->tilebits); + assert(lvl->size > 0); - /* All images are aligned to a 32-byte offset */ - *curOffset = (*curOffset + 0x1f) & ~0x1f; lvl->faces[face].offset = *curOffset; *curOffset += lvl->size; - if (RADEON_DEBUG & RADEON_TEXTURE) - fprintf(stderr, - "level %d, face %d: rs:%d %dx%d at %d\n", - level, face, lvl->rowstride, lvl->width, lvl->height, lvl->faces[face].offset); + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p) level %d, face %d: rs:%d %dx%d at %d\n", + __func__, rmesa, + level, face, + lvl->rowstride, lvl->width, height, lvl->faces[face].offset); } static GLuint minify(GLuint size, GLuint levels) @@ -137,78 +158,86 @@ static GLuint minify(GLuint size, GLuint levels) static void calculate_miptree_layout_r100(radeonContextPtr rmesa, radeon_mipmap_tree *mt) { - GLuint curOffset; - GLuint numLevels; - GLuint i; - GLuint face; + GLuint curOffset, i, face, level; - numLevels = mt->lastLevel - mt->firstLevel + 1; - assert(numLevels <= rmesa->glCtx->Const.MaxTextureLevels); + assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels); curOffset = 0; for(face = 0; face < mt->faces; face++) { - for(i = 0; i < numLevels; i++) { - mt->levels[i].width = minify(mt->width0, i); - mt->levels[i].height = minify(mt->height0, i); - mt->levels[i].depth = minify(mt->depth0, i); - compute_tex_image_offset(rmesa, mt, face, i, &curOffset); + for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) { + mt->levels[level].valid = 1; + mt->levels[level].width = minify(mt->width0, i); + mt->levels[level].height = minify(mt->height0, i); + mt->levels[level].depth = minify(mt->depth0, i); + compute_tex_image_offset(rmesa, mt, face, level, &curOffset); } } /* Note the required size in memory */ mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK; + + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p, %p) total size %d\n", + __func__, rmesa, mt, mt->totalsize); } static void calculate_miptree_layout_r300(radeonContextPtr rmesa, radeon_mipmap_tree *mt) { - GLuint curOffset; - GLuint numLevels; - GLuint i; + GLuint curOffset, i, level; - numLevels = mt->lastLevel - mt->firstLevel + 1; - assert(numLevels <= rmesa->glCtx->Const.MaxTextureLevels); + assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels); curOffset = 0; - for(i = 0; i < numLevels; i++) { + for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) { GLuint face; - mt->levels[i].width = minify(mt->width0, i); - mt->levels[i].height = minify(mt->height0, i); - mt->levels[i].depth = minify(mt->depth0, i); + mt->levels[level].valid = 1; + mt->levels[level].width = minify(mt->width0, i); + mt->levels[level].height = minify(mt->height0, i); + mt->levels[level].depth = minify(mt->depth0, i); for(face = 0; face < mt->faces; face++) - compute_tex_image_offset(rmesa, mt, face, i, &curOffset); + compute_tex_image_offset(rmesa, mt, face, level, &curOffset); + /* r600 cube levels seems to be aligned to 8 faces but + * we have separate register for 1'st level offset so add + * 2 image alignment after 1'st mip level */ + if(rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R600 && + mt->target == GL_TEXTURE_CUBE_MAP && level >= 1) + curOffset += 2 * mt->levels[level].size; } /* Note the required size in memory */ mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK; + + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p, %p) total size %d\n", + __func__, rmesa, mt, mt->totalsize); } /** * Create a new mipmap tree, calculate its layout and allocate memory. */ -radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa, radeonTexObj *t, - GLenum target, GLenum internal_format, GLuint firstLevel, GLuint lastLevel, - GLuint width0, GLuint height0, GLuint depth0, - GLuint bpp, GLuint tilebits, GLuint compressed) +static radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa, + GLenum target, gl_format mesaFormat, GLuint baseLevel, GLuint numLevels, + GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits) { radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree); - mt->radeon = rmesa; - mt->internal_format = internal_format; + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s(%p) new tree is %p.\n", + __func__, rmesa, mt); + + mt->mesaFormat = mesaFormat; mt->refcount = 1; - mt->t = t; mt->target = target; mt->faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1; - mt->firstLevel = firstLevel; - mt->lastLevel = lastLevel; + mt->baseLevel = baseLevel; + mt->numLevels = numLevels; mt->width0 = width0; mt->height0 = height0; mt->depth0 = depth0; - mt->bpp = compressed ? radeon_compressed_num_bytes(compressed) : bpp; mt->tilebits = tilebits; - mt->compressed = compressed; if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R300) calculate_miptree_layout_r300(rmesa, mt); @@ -223,53 +252,43 @@ radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa, radeonTexObj * return mt; } -void radeon_miptree_reference(radeon_mipmap_tree *mt) +void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr) { + assert(!*ptr); + mt->refcount++; assert(mt->refcount > 0); + + *ptr = mt; } -void radeon_miptree_unreference(radeon_mipmap_tree *mt) +void radeon_miptree_unreference(radeon_mipmap_tree **ptr) { + radeon_mipmap_tree *mt = *ptr; if (!mt) return; assert(mt->refcount > 0); + mt->refcount--; if (!mt->refcount) { radeon_bo_unref(mt->bo); free(mt); } -} + *ptr = 0; +} /** - * Calculate first and last mip levels for the given texture object, - * where the dimensions are taken from the given texture image at - * the given level. - * - * Note: level is the OpenGL level number, which is not necessarily the same - * as the first level that is actually present. - * - * The base level image of the given texture face must be non-null, - * or this will fail. + * Calculate min and max LOD for the given texture object. + * @param[in] tObj texture object whose LOD values to calculate + * @param[out] pminLod minimal LOD + * @param[out] pmaxLod maximal LOD */ -static void calculate_first_last_level(struct gl_texture_object *tObj, - GLuint *pfirstLevel, GLuint *plastLevel, - GLuint face, GLuint level) +static void calculate_min_max_lod(struct gl_texture_object *tObj, + unsigned *pminLod, unsigned *pmaxLod) { - const struct gl_texture_image * const baseImage = - tObj->Image[face][level]; - - assert(baseImage); - - /* These must be signed values. MinLod and MaxLod can be negative numbers, - * and having firstLevel and lastLevel as signed prevents the need for - * extra sign checks. - */ - int firstLevel; - int lastLevel; - + int minLod, maxLod; /* Yes, this looks overly complicated, but it's all needed. */ switch (tObj->Target) { @@ -280,32 +299,36 @@ static void calculate_first_last_level(struct gl_texture_object *tObj, if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) { /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL. */ - firstLevel = lastLevel = tObj->BaseLevel; + minLod = maxLod = tObj->BaseLevel; } else { - firstLevel = tObj->BaseLevel + (GLint)(tObj->MinLod + 0.5); - firstLevel = MAX2(firstLevel, tObj->BaseLevel); - firstLevel = MIN2(firstLevel, level + baseImage->MaxLog2); - lastLevel = tObj->BaseLevel + (GLint)(tObj->MaxLod + 0.5); - lastLevel = MAX2(lastLevel, tObj->BaseLevel); - lastLevel = MIN2(lastLevel, level + baseImage->MaxLog2); - lastLevel = MIN2(lastLevel, tObj->MaxLevel); - lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */ + minLod = tObj->BaseLevel + (GLint)(tObj->MinLod); + minLod = MAX2(minLod, tObj->BaseLevel); + minLod = MIN2(minLod, tObj->MaxLevel); + maxLod = tObj->BaseLevel + (GLint)(tObj->MaxLod + 0.5); + maxLod = MIN2(maxLod, tObj->MaxLevel); + maxLod = MIN2(maxLod, tObj->Image[0][minLod]->MaxLog2 + minLod); + maxLod = MAX2(maxLod, minLod); /* need at least one level */ } break; case GL_TEXTURE_RECTANGLE_NV: case GL_TEXTURE_4D_SGIS: - firstLevel = lastLevel = 0; + minLod = maxLod = 0; break; default: return; } + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "%s(%p) target %s, min %d, max %d.\n", + __func__, tObj, + _mesa_lookup_enum_by_nr(tObj->Target), + minLod, maxLod); + /* save these values */ - *pfirstLevel = firstLevel; - *plastLevel = lastLevel; + *pminLod = minLod; + *pmaxLod = maxLod; } - /** * Checks whether the given miptree can hold the given texture image at the * given face and level. @@ -313,23 +336,17 @@ static void calculate_first_last_level(struct gl_texture_object *tObj, GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt, struct gl_texture_image *texImage, GLuint face, GLuint level) { - GLboolean isCompressed = _mesa_is_format_compressed(texImage->TexFormat); radeon_mipmap_level *lvl; - if (face >= mt->faces || level < mt->firstLevel || level > mt->lastLevel) + if (face >= mt->faces) return GL_FALSE; - if (texImage->InternalFormat != mt->internal_format || - isCompressed != mt->compressed) + if (texImage->TexFormat != mt->mesaFormat) return GL_FALSE; - if (!isCompressed && - !mt->compressed && - _mesa_get_format_bytes(texImage->TexFormat) != mt->bpp) - return GL_FALSE; - - lvl = &mt->levels[level - mt->firstLevel]; - if (lvl->width != texImage->Width || + lvl = &mt->levels[level]; + if (!lvl->valid || + lvl->width != texImage->Width || lvl->height != texImage->Height || lvl->depth != texImage->Depth) return GL_FALSE; @@ -337,90 +354,303 @@ GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt, return GL_TRUE; } - /** * Checks whether the given miptree has the right format to store the given texture object. */ -GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj) +static GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj) { struct gl_texture_image *firstImage; - GLuint compressed; - GLuint numfaces = 1; - GLuint firstLevel, lastLevel; - GLuint texelBytes; - - calculate_first_last_level(texObj, &firstLevel, &lastLevel, 0, texObj->BaseLevel); - if (texObj->Target == GL_TEXTURE_CUBE_MAP) - numfaces = 6; - - firstImage = texObj->Image[0][firstLevel]; - compressed = _mesa_is_format_compressed(firstImage->TexFormat) ? firstImage->TexFormat : 0; - texelBytes = _mesa_get_format_bytes(firstImage->TexFormat); - - return (mt->firstLevel == firstLevel && - mt->lastLevel == lastLevel && - mt->width0 == firstImage->Width && - mt->height0 == firstImage->Height && - mt->depth0 == firstImage->Depth && - mt->compressed == compressed && - (!mt->compressed ? (mt->bpp == texelBytes) : 1)); -} + unsigned numLevels; + radeon_mipmap_level *mtBaseLevel; + + if (texObj->BaseLevel < mt->baseLevel) + return GL_FALSE; + + mtBaseLevel = &mt->levels[texObj->BaseLevel - mt->baseLevel]; + firstImage = texObj->Image[0][texObj->BaseLevel]; + numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, firstImage->MaxLog2 + 1); + + if (radeon_is_debug_enabled(RADEON_TEXTURE,RADEON_TRACE)) { + fprintf(stderr, "Checking if miptree %p matches texObj %p\n", mt, texObj); + fprintf(stderr, "target %d vs %d\n", mt->target, texObj->Target); + fprintf(stderr, "format %d vs %d\n", mt->mesaFormat, firstImage->TexFormat); + fprintf(stderr, "numLevels %d vs %d\n", mt->numLevels, numLevels); + fprintf(stderr, "width0 %d vs %d\n", mtBaseLevel->width, firstImage->Width); + fprintf(stderr, "height0 %d vs %d\n", mtBaseLevel->height, firstImage->Height); + fprintf(stderr, "depth0 %d vs %d\n", mtBaseLevel->depth, firstImage->Depth); + if (mt->target == texObj->Target && + mt->mesaFormat == firstImage->TexFormat && + mt->numLevels >= numLevels && + mtBaseLevel->width == firstImage->Width && + mtBaseLevel->height == firstImage->Height && + mtBaseLevel->depth == firstImage->Depth) { + fprintf(stderr, "MATCHED\n"); + } else { + fprintf(stderr, "NOT MATCHED\n"); + } + } + return (mt->target == texObj->Target && + mt->mesaFormat == firstImage->TexFormat && + mt->numLevels >= numLevels && + mtBaseLevel->width == firstImage->Width && + mtBaseLevel->height == firstImage->Height && + mtBaseLevel->depth == firstImage->Depth); +} /** - * Try to allocate a mipmap tree for the given texture that will fit the - * given image in the given position. + * Try to allocate a mipmap tree for the given texture object. + * @param[in] rmesa radeon context + * @param[in] t radeon texture object */ -void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t, - radeon_texture_image *image, GLuint face, GLuint level) +void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t) { - GLuint compressed = _mesa_is_format_compressed(image->base.TexFormat) ? image->base.TexFormat : 0; - GLuint numfaces = 1; - GLuint firstLevel, lastLevel; - GLuint texelBytes; + struct gl_texture_object *texObj = &t->base; + struct gl_texture_image *texImg = texObj->Image[0][texObj->BaseLevel]; + GLuint numLevels; assert(!t->mt); - calculate_first_last_level(&t->base, &firstLevel, &lastLevel, face, level); - if (t->base.Target == GL_TEXTURE_CUBE_MAP) - numfaces = 6; - - if (level != firstLevel || face >= numfaces) + if (!texImg) { + radeon_warning("%s(%p) No image in given texture object(%p).\n", + __func__, rmesa, t); return; + } - texelBytes = _mesa_get_format_bytes(image->base.TexFormat); - t->mt = radeon_miptree_create(rmesa, t, t->base.Target, - image->base.InternalFormat, - firstLevel, lastLevel, - image->base.Width, image->base.Height, image->base.Depth, - texelBytes, t->tile_bits, compressed); -} + numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, texImg->MaxLog2 + 1); -/* Although we use the image_offset[] array to store relative offsets - * to cube faces, Mesa doesn't know anything about this and expects - * each cube face to be treated as a separate image. - * - * These functions present that view to mesa: - */ -void -radeon_miptree_depth_offsets(radeon_mipmap_tree *mt, GLuint level, GLuint *offsets) -{ - if (mt->target != GL_TEXTURE_3D || mt->faces == 1) - offsets[0] = 0; - else { - int i; - for (i = 0; i < 6; i++) - offsets[i] = mt->levels[level].faces[i].offset; - } + t->mt = radeon_miptree_create(rmesa, t->base.Target, + texImg->TexFormat, texObj->BaseLevel, + numLevels, texImg->Width, texImg->Height, + texImg->Depth, t->tile_bits); } GLuint radeon_miptree_image_offset(radeon_mipmap_tree *mt, GLuint face, GLuint level) { - if (mt->target == GL_TEXTURE_CUBE_MAP_ARB) - return (mt->levels[level].faces[face].offset); - else - return mt->levels[level].faces[0].offset; + if (mt->target == GL_TEXTURE_CUBE_MAP_ARB) + return (mt->levels[level].faces[face].offset); + else + return mt->levels[level].faces[0].offset; +} + +/** + * Ensure that the given image is stored in the given miptree from now on. + */ +static void migrate_image_to_miptree(radeon_mipmap_tree *mt, + radeon_texture_image *image, + int face, int level) +{ + radeon_mipmap_level *dstlvl = &mt->levels[level]; + unsigned char *dest; + + assert(image->mt != mt); + assert(dstlvl->valid); + assert(dstlvl->width == image->base.Width); + assert(dstlvl->height == image->base.Height); + assert(dstlvl->depth == image->base.Depth); + + radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, + "%s miptree %p, image %p, face %d, level %d.\n", + __func__, mt, image, face, level); + + radeon_bo_map(mt->bo, GL_TRUE); + dest = mt->bo->ptr + dstlvl->faces[face].offset; + + if (image->mt) { + /* Format etc. should match, so we really just need a memcpy(). + * In fact, that memcpy() could be done by the hardware in many + * cases, provided that we have a proper memory manager. + */ + assert(mt->mesaFormat == image->base.TexFormat); + + radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel]; + + assert(image->mtlevel == level); + assert(srclvl->size == dstlvl->size); + assert(srclvl->rowstride == dstlvl->rowstride); + + radeon_bo_map(image->mt->bo, GL_FALSE); + + memcpy(dest, + image->mt->bo->ptr + srclvl->faces[face].offset, + dstlvl->size); + radeon_bo_unmap(image->mt->bo); + + radeon_miptree_unreference(&image->mt); + } else if (image->base.Data) { + /* This condition should be removed, it's here to workaround + * a segfault when mapping textures during software fallbacks. + */ + radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT, + "%s Trying to map texture in sowftware fallback.\n", + __func__); + const uint32_t srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width); + uint32_t rows = image->base.Height * image->base.Depth; + + if (_mesa_is_format_compressed(image->base.TexFormat)) { + uint32_t blockWidth, blockHeight; + _mesa_get_format_block_size(image->base.TexFormat, &blockWidth, &blockHeight); + rows = (rows + blockHeight - 1) / blockHeight; + } + + copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride, + rows, srcrowstride); + + _mesa_free_texmemory(image->base.Data); + image->base.Data = 0; + } + + radeon_bo_unmap(mt->bo); + + radeon_miptree_reference(mt, &image->mt); + image->mtface = face; + image->mtlevel = level; +} + +/** + * Filter matching miptrees, and select one with the most of data. + * @param[in] texObj radeon texture object + * @param[in] firstLevel first texture level to check + * @param[in] lastLevel last texture level to check + */ +static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj, + unsigned firstLevel, + unsigned lastLevel) +{ + const unsigned numLevels = lastLevel - firstLevel + 1; + unsigned *mtSizes = calloc(numLevels, sizeof(unsigned)); + radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *)); + unsigned mtCount = 0; + unsigned maxMtIndex = 0; + radeon_mipmap_tree *tmp; + + for (unsigned level = firstLevel; level <= lastLevel; ++level) { + radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]); + unsigned found = 0; + // TODO: why this hack?? + if (!img) + break; + + if (!img->mt) + continue; + + for (int i = 0; i < mtCount; ++i) { + if (mts[i] == img->mt) { + found = 1; + mtSizes[i] += img->mt->levels[img->mtlevel].size; + break; + } + } + + if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) { + mtSizes[mtCount] = img->mt->levels[img->mtlevel].size; + mts[mtCount] = img->mt; + mtCount++; + } + } + + if (mtCount == 0) { + free(mtSizes); + free(mts); + return NULL; + } + + for (int i = 1; i < mtCount; ++i) { + if (mtSizes[i] > mtSizes[maxMtIndex]) { + maxMtIndex = i; + } + } + + tmp = mts[maxMtIndex]; + free(mtSizes); + free(mts); + + return tmp; +} + +/** + * Validate texture mipmap tree. + * If individual images are stored in different mipmap trees + * use the mipmap tree that has the most of the correct data. + */ +int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj) +{ + radeonContextPtr rmesa = RADEON_CONTEXT(ctx); + radeonTexObj *t = radeon_tex_obj(texObj); + + if (t->validated || t->image_override) { + return GL_TRUE; + } + + if (texObj->Image[0][texObj->BaseLevel]->Border > 0) + return GL_FALSE; + + _mesa_test_texobj_completeness(rmesa->glCtx, texObj); + if (!texObj->_Complete) { + return GL_FALSE; + } + + calculate_min_max_lod(&t->base, &t->minLod, &t->maxLod); + + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s: Validating texture %p now, minLod = %d, maxLod = %d\n", + __FUNCTION__, texObj ,t->minLod, t->maxLod); + + radeon_mipmap_tree *dst_miptree; + dst_miptree = get_biggest_matching_miptree(t, t->minLod, t->maxLod); + + if (!dst_miptree) { + radeon_miptree_unreference(&t->mt); + radeon_try_alloc_miptree(rmesa, t); + dst_miptree = t->mt; + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s: No matching miptree found, allocated new one %p\n", + __FUNCTION__, t->mt); + + } else { + radeon_print(RADEON_TEXTURE, RADEON_NORMAL, + "%s: Using miptree %p\n", __FUNCTION__, t->mt); + } + + const unsigned faces = texObj->Target == GL_TEXTURE_CUBE_MAP ? 6 : 1; + unsigned face, level; + radeon_texture_image *img; + /* Validate only the levels that will actually be used during rendering */ + for (face = 0; face < faces; ++face) { + for (level = t->minLod; level <= t->maxLod; ++level) { + img = get_radeon_texture_image(texObj->Image[face][level]); + + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "Checking image level %d, face %d, mt %p ... ", + level, face, img->mt); + + if (img->mt != dst_miptree) { + radeon_print(RADEON_TEXTURE, RADEON_TRACE, + "MIGRATING\n"); + + struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo; + if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) { + radeon_firevertices(rmesa); + } + migrate_image_to_miptree(dst_miptree, img, face, level); + } else + radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n"); + } + } + + t->validated = GL_TRUE; + + return GL_TRUE; +} + +uint32_t get_base_teximage_offset(radeonTexObj *texObj) +{ + if (!texObj->mt) { + return 0; + } else { + return radeon_miptree_image_offset(texObj->mt, 0, texObj->minLod); + } }