2 * Copyright (C) 2008 Nicolai Haehnle.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "radeon_mipmap_tree.h"
33 #include "main/simple_list.h"
34 #include "main/texcompress.h"
35 #include "main/texformat.h"
37 static GLuint
radeon_compressed_texture_size(GLcontext
*ctx
,
38 GLsizei width
, GLsizei height
, GLsizei depth
,
41 GLuint size
= _mesa_compressed_texture_size(ctx
, width
, height
, depth
, mesaFormat
);
43 if (mesaFormat
== MESA_FORMAT_RGB_DXT1
||
44 mesaFormat
== MESA_FORMAT_RGBA_DXT1
) {
45 if (width
+ 3 < 8) /* width one block */
47 else if (width
+ 3 < 16)
50 /* DXT3/5, 16 bytes per block */
51 // WARN_ONCE("DXT 3/5 suffers from multitexturing problems!\n");
60 static int radeon_compressed_num_bytes(GLuint mesaFormat
)
65 case MESA_FORMAT_RGB_FXT1
:
66 case MESA_FORMAT_RGBA_FXT1
:
67 case MESA_FORMAT_RGB_DXT1
:
68 case MESA_FORMAT_RGBA_DXT1
:
72 case MESA_FORMAT_RGBA_DXT3
:
73 case MESA_FORMAT_RGBA_DXT5
:
83 * Compute sizes and fill in offset and blit information for the given
84 * image (determined by \p face and \p level).
86 * \param curOffset points to the offset at which the image is to be stored
87 * and is updated by this function according to the size of the image.
89 static void compute_tex_image_offset(radeonContextPtr rmesa
, radeon_mipmap_tree
*mt
,
90 GLuint face
, GLuint level
, GLuint
* curOffset
)
92 radeon_mipmap_level
*lvl
= &mt
->levels
[level
];
95 /* Find image size in bytes */
97 /* TODO: Is this correct? Need test cases for compressed textures! */
98 row_align
= rmesa
->texture_compressed_row_align
- 1;
99 lvl
->rowstride
= (lvl
->width
* mt
->bpp
+ row_align
) & ~row_align
;
100 lvl
->size
= radeon_compressed_texture_size(mt
->radeon
->glCtx
,
101 lvl
->width
, lvl
->height
, lvl
->depth
, mt
->compressed
);
102 } else if (mt
->target
== GL_TEXTURE_RECTANGLE_NV
) {
103 row_align
= rmesa
->texture_rect_row_align
- 1;
104 lvl
->rowstride
= (lvl
->width
* mt
->bpp
+ row_align
) & ~row_align
;
105 lvl
->size
= lvl
->rowstride
* lvl
->height
;
106 } else if (mt
->tilebits
& RADEON_TXO_MICRO_TILE
) {
107 /* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned,
108 * though the actual offset may be different (if texture is less than
109 * 32 bytes width) to the untiled case */
110 lvl
->rowstride
= (lvl
->width
* mt
->bpp
* 2 + 31) & ~31;
111 lvl
->size
= lvl
->rowstride
* ((lvl
->height
+ 1) / 2) * lvl
->depth
;
113 row_align
= rmesa
->texture_row_align
- 1;
114 lvl
->rowstride
= (lvl
->width
* mt
->bpp
+ row_align
) & ~row_align
;
115 lvl
->size
= lvl
->rowstride
* lvl
->height
* lvl
->depth
;
117 assert(lvl
->size
> 0);
119 /* All images are aligned to a 32-byte offset */
120 *curOffset
= (*curOffset
+ 0x1f) & ~0x1f;
121 lvl
->faces
[face
].offset
= *curOffset
;
122 *curOffset
+= lvl
->size
;
124 if (RADEON_DEBUG
& DEBUG_TEXTURE
)
126 "level %d, face %d: rs:%d %dx%d at %d\n",
127 level
, face
, lvl
->rowstride
, lvl
->width
, lvl
->height
, lvl
->faces
[face
].offset
);
130 static GLuint
minify(GLuint size
, GLuint levels
)
132 size
= size
>> levels
;
139 static void calculate_miptree_layout_r100(radeonContextPtr rmesa
, radeon_mipmap_tree
*mt
)
146 numLevels
= mt
->lastLevel
- mt
->firstLevel
+ 1;
147 assert(numLevels
<= rmesa
->glCtx
->Const
.MaxTextureLevels
);
150 for(face
= 0; face
< mt
->faces
; face
++) {
152 for(i
= 0; i
< numLevels
; i
++) {
153 mt
->levels
[i
].width
= minify(mt
->width0
, i
);
154 mt
->levels
[i
].height
= minify(mt
->height0
, i
);
155 mt
->levels
[i
].depth
= minify(mt
->depth0
, i
);
156 compute_tex_image_offset(rmesa
, mt
, face
, i
, &curOffset
);
160 /* Note the required size in memory */
161 mt
->totalsize
= (curOffset
+ RADEON_OFFSET_MASK
) & ~RADEON_OFFSET_MASK
;
164 static void calculate_miptree_layout_r300(radeonContextPtr rmesa
, radeon_mipmap_tree
*mt
)
170 numLevels
= mt
->lastLevel
- mt
->firstLevel
+ 1;
171 assert(numLevels
<= rmesa
->glCtx
->Const
.MaxTextureLevels
);
174 for(i
= 0; i
< numLevels
; i
++) {
177 mt
->levels
[i
].width
= minify(mt
->width0
, i
);
178 mt
->levels
[i
].height
= minify(mt
->height0
, i
);
179 mt
->levels
[i
].depth
= minify(mt
->depth0
, i
);
181 for(face
= 0; face
< mt
->faces
; face
++)
182 compute_tex_image_offset(rmesa
, mt
, face
, i
, &curOffset
);
185 /* Note the required size in memory */
186 mt
->totalsize
= (curOffset
+ RADEON_OFFSET_MASK
) & ~RADEON_OFFSET_MASK
;
190 * Create a new mipmap tree, calculate its layout and allocate memory.
192 radeon_mipmap_tree
* radeon_miptree_create(radeonContextPtr rmesa
, radeonTexObj
*t
,
193 GLenum target
, GLuint firstLevel
, GLuint lastLevel
,
194 GLuint width0
, GLuint height0
, GLuint depth0
,
195 GLuint bpp
, GLuint tilebits
, GLuint compressed
)
197 radeon_mipmap_tree
*mt
= CALLOC_STRUCT(_radeon_mipmap_tree
);
203 mt
->faces
= (target
== GL_TEXTURE_CUBE_MAP
) ? 6 : 1;
204 mt
->firstLevel
= firstLevel
;
205 mt
->lastLevel
= lastLevel
;
207 mt
->height0
= height0
;
209 mt
->bpp
= compressed
? radeon_compressed_num_bytes(compressed
) : bpp
;
210 mt
->tilebits
= tilebits
;
211 mt
->compressed
= compressed
;
213 if (rmesa
->radeonScreen
->chip_family
>= CHIP_FAMILY_R300
)
214 calculate_miptree_layout_r300(rmesa
, mt
);
216 calculate_miptree_layout_r100(rmesa
, mt
);
218 mt
->bo
= radeon_bo_open(rmesa
->radeonScreen
->bom
,
219 0, mt
->totalsize
, 1024,
220 RADEON_GEM_DOMAIN_VRAM
,
226 void radeon_miptree_reference(radeon_mipmap_tree
*mt
)
229 assert(mt
->refcount
> 0);
232 void radeon_miptree_unreference(radeon_mipmap_tree
*mt
)
237 assert(mt
->refcount
> 0);
240 radeon_bo_unref(mt
->bo
);
247 * Calculate first and last mip levels for the given texture object,
248 * where the dimensions are taken from the given texture image at
251 * Note: level is the OpenGL level number, which is not necessarily the same
252 * as the first level that is actually present.
254 * The base level image of the given texture face must be non-null,
257 static void calculate_first_last_level(struct gl_texture_object
*tObj
,
258 GLuint
*pfirstLevel
, GLuint
*plastLevel
,
259 GLuint face
, GLuint level
)
261 const struct gl_texture_image
* const baseImage
=
262 tObj
->Image
[face
][level
];
266 /* These must be signed values. MinLod and MaxLod can be negative numbers,
267 * and having firstLevel and lastLevel as signed prevents the need for
273 /* Yes, this looks overly complicated, but it's all needed.
275 switch (tObj
->Target
) {
279 case GL_TEXTURE_CUBE_MAP
:
280 if (tObj
->MinFilter
== GL_NEAREST
|| tObj
->MinFilter
== GL_LINEAR
) {
281 /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
283 firstLevel
= lastLevel
= tObj
->BaseLevel
;
285 firstLevel
= tObj
->BaseLevel
+ (GLint
)(tObj
->MinLod
+ 0.5);
286 firstLevel
= MAX2(firstLevel
, tObj
->BaseLevel
);
287 firstLevel
= MIN2(firstLevel
, level
+ baseImage
->MaxLog2
);
288 lastLevel
= tObj
->BaseLevel
+ (GLint
)(tObj
->MaxLod
+ 0.5);
289 lastLevel
= MAX2(lastLevel
, tObj
->BaseLevel
);
290 lastLevel
= MIN2(lastLevel
, level
+ baseImage
->MaxLog2
);
291 lastLevel
= MIN2(lastLevel
, tObj
->MaxLevel
);
292 lastLevel
= MAX2(firstLevel
, lastLevel
); /* need at least one level */
295 case GL_TEXTURE_RECTANGLE_NV
:
296 case GL_TEXTURE_4D_SGIS
:
297 firstLevel
= lastLevel
= 0;
303 /* save these values */
304 *pfirstLevel
= firstLevel
;
305 *plastLevel
= lastLevel
;
310 * Checks whether the given miptree can hold the given texture image at the
311 * given face and level.
313 GLboolean
radeon_miptree_matches_image(radeon_mipmap_tree
*mt
,
314 struct gl_texture_image
*texImage
, GLuint face
, GLuint level
)
316 radeon_mipmap_level
*lvl
;
318 if (face
>= mt
->faces
|| level
< mt
->firstLevel
|| level
> mt
->lastLevel
)
321 if ((!texImage
->IsCompressed
&& mt
->compressed
) ||
322 (texImage
->IsCompressed
&& !mt
->compressed
))
325 if (!texImage
->IsCompressed
&&
327 texImage
->TexFormat
->TexelBytes
!= mt
->bpp
)
330 lvl
= &mt
->levels
[level
- mt
->firstLevel
];
331 if (lvl
->width
!= texImage
->Width
||
332 lvl
->height
!= texImage
->Height
||
333 lvl
->depth
!= texImage
->Depth
)
341 * Checks whether the given miptree has the right format to store the given texture object.
343 GLboolean
radeon_miptree_matches_texture(radeon_mipmap_tree
*mt
, struct gl_texture_object
*texObj
)
345 struct gl_texture_image
*firstImage
;
348 GLuint firstLevel
, lastLevel
;
350 calculate_first_last_level(texObj
, &firstLevel
, &lastLevel
, 0, texObj
->BaseLevel
);
351 if (texObj
->Target
== GL_TEXTURE_CUBE_MAP
)
354 firstImage
= texObj
->Image
[0][firstLevel
];
355 compressed
= firstImage
->IsCompressed
? firstImage
->TexFormat
->MesaFormat
: 0;
357 return (mt
->firstLevel
== firstLevel
&&
358 mt
->lastLevel
== lastLevel
&&
359 mt
->width0
== firstImage
->Width
&&
360 mt
->height0
== firstImage
->Height
&&
361 mt
->depth0
== firstImage
->Depth
&&
362 mt
->compressed
== compressed
&&
363 (!mt
->compressed
? (mt
->bpp
== firstImage
->TexFormat
->TexelBytes
) : 1));
368 * Try to allocate a mipmap tree for the given texture that will fit the
369 * given image in the given position.
371 void radeon_try_alloc_miptree(radeonContextPtr rmesa
, radeonTexObj
*t
,
372 struct gl_texture_image
*texImage
, GLuint face
, GLuint level
)
374 GLuint compressed
= texImage
->IsCompressed
? texImage
->TexFormat
->MesaFormat
: 0;
376 GLuint firstLevel
, lastLevel
;
380 calculate_first_last_level(&t
->base
, &firstLevel
, &lastLevel
, face
, level
);
381 if (t
->base
.Target
== GL_TEXTURE_CUBE_MAP
)
384 if (level
!= firstLevel
|| face
>= numfaces
)
387 t
->mt
= radeon_miptree_create(rmesa
, t
, t
->base
.Target
,
388 firstLevel
, lastLevel
,
389 texImage
->Width
, texImage
->Height
, texImage
->Depth
,
390 texImage
->TexFormat
->TexelBytes
, t
->tile_bits
, compressed
);
393 /* Although we use the image_offset[] array to store relative offsets
394 * to cube faces, Mesa doesn't know anything about this and expects
395 * each cube face to be treated as a separate image.
397 * These functions present that view to mesa:
400 radeon_miptree_depth_offsets(radeon_mipmap_tree
*mt
, GLuint level
, GLuint
*offsets
)
402 if (mt
->target
!= GL_TEXTURE_3D
|| mt
->faces
== 1)
406 for (i
= 0; i
< 6; i
++)
407 offsets
[i
] = mt
->levels
[level
].faces
[i
].offset
;
412 radeon_miptree_image_offset(radeon_mipmap_tree
*mt
,
413 GLuint face
, GLuint level
)
415 if (mt
->target
== GL_TEXTURE_CUBE_MAP_ARB
)
416 return (mt
->levels
[level
].faces
[face
].offset
);
418 return mt
->levels
[level
].faces
[0].offset
;