radeon: add texture helper function
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_mipmap_tree.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 *
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include "radeon_mipmap_tree.h"
30
31 #include <errno.h>
32 #include <unistd.h>
33
34 #include "main/simple_list.h"
35 #include "main/teximage.h"
36 #include "main/texobj.h"
37 #include "main/enums.h"
38 #include "radeon_texture.h"
39
40 static unsigned get_aligned_compressed_row_stride(
41 gl_format format,
42 unsigned width,
43 unsigned minStride)
44 {
45 const unsigned blockBytes = _mesa_get_format_bytes(format);
46 unsigned blockWidth, blockHeight;
47 unsigned stride;
48
49 _mesa_get_format_block_size(format, &blockWidth, &blockHeight);
50
51 /* Count number of blocks required to store the given width.
52 * And then multiple it with bytes required to store a block.
53 */
54 stride = (width + blockWidth - 1) / blockWidth * blockBytes;
55
56 /* Round the given minimum stride to the next full blocksize.
57 * (minStride + blockBytes - 1) / blockBytes * blockBytes
58 */
59 if ( stride < minStride )
60 stride = (minStride + blockBytes - 1) / blockBytes * blockBytes;
61
62 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
63 "%s width %u, minStride %u, block(bytes %u, width %u):"
64 "stride %u\n",
65 __func__, width, minStride,
66 blockBytes, blockWidth,
67 stride);
68
69 return stride;
70 }
71
72 static unsigned get_compressed_image_size(
73 gl_format format,
74 unsigned rowStride,
75 unsigned height)
76 {
77 unsigned blockWidth, blockHeight;
78
79 _mesa_get_format_block_size(format, &blockWidth, &blockHeight);
80
81 return rowStride * ((height + blockHeight - 1) / blockHeight);
82 }
83
84 static unsigned is_pot(unsigned value)
85 {
86 unsigned m;
87
88 for (m = 1; m < value; m *= 2) {}
89
90 return value == m;
91 }
92
93 unsigned get_texture_image_row_stride(radeonContextPtr rmesa, gl_format format, unsigned width)
94 {
95 if (_mesa_is_format_compressed(format)) {
96 return get_aligned_compressed_row_stride(format, width, rmesa->texture_compressed_row_align);
97 } else {
98 unsigned row_align;
99
100 if (is_pot(width)) {
101 row_align = rmesa->texture_row_align - 1;
102 } else {
103 row_align = rmesa->texture_rect_row_align - 1;
104 }
105
106 return (_mesa_format_row_stride(format, width) + row_align) & ~row_align;
107 }
108 }
109
110 /**
111 * Compute sizes and fill in offset and blit information for the given
112 * image (determined by \p face and \p level).
113 *
114 * \param curOffset points to the offset at which the image is to be stored
115 * and is updated by this function according to the size of the image.
116 */
117 static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree *mt,
118 GLuint face, GLuint level, GLuint* curOffset)
119 {
120 radeon_mipmap_level *lvl = &mt->levels[level];
121 uint32_t row_align;
122 GLuint height;
123
124 height = _mesa_next_pow_two_32(lvl->height);
125
126 /* Find image size in bytes */
127 if (_mesa_is_format_compressed(mt->mesaFormat)) {
128 lvl->rowstride = get_aligned_compressed_row_stride(mt->mesaFormat, lvl->width, rmesa->texture_compressed_row_align);
129 lvl->size = get_compressed_image_size(mt->mesaFormat, lvl->rowstride, height);
130 } else if (mt->target == GL_TEXTURE_RECTANGLE_NV) {
131 row_align = rmesa->texture_rect_row_align - 1;
132 lvl->rowstride = (_mesa_format_row_stride(mt->mesaFormat, lvl->width) + row_align) & ~row_align;
133 lvl->size = lvl->rowstride * height;
134 } else if (mt->tilebits & RADEON_TXO_MICRO_TILE) {
135 /* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned,
136 * though the actual offset may be different (if texture is less than
137 * 32 bytes width) to the untiled case */
138 lvl->rowstride = (_mesa_format_row_stride(mt->mesaFormat, lvl->width) * 2 + 31) & ~31;
139 lvl->size = lvl->rowstride * ((height + 1) / 2) * lvl->depth;
140 } else {
141 row_align = rmesa->texture_row_align - 1;
142 lvl->rowstride = (_mesa_format_row_stride(mt->mesaFormat, lvl->width) + row_align) & ~row_align;
143 lvl->size = lvl->rowstride * height * lvl->depth;
144 }
145 assert(lvl->size > 0);
146
147 /* All images are aligned to a 32-byte offset */
148 *curOffset = (*curOffset + 0x1f) & ~0x1f;
149 lvl->faces[face].offset = *curOffset;
150 *curOffset += lvl->size;
151
152 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
153 "%s(%p) level %d, face %d: rs:%d %dx%d at %d\n",
154 __func__, rmesa,
155 level, face,
156 lvl->rowstride, lvl->width, height, lvl->faces[face].offset);
157 }
158
159 static GLuint minify(GLuint size, GLuint levels)
160 {
161 size = size >> levels;
162 if (size < 1)
163 size = 1;
164 return size;
165 }
166
167
168 static void calculate_miptree_layout_r100(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
169 {
170 GLuint curOffset, i, face, level;
171
172 assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels);
173
174 curOffset = 0;
175 for(face = 0; face < mt->faces; face++) {
176
177 for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
178 mt->levels[level].valid = 1;
179 mt->levels[level].width = minify(mt->width0, i);
180 mt->levels[level].height = minify(mt->height0, i);
181 mt->levels[level].depth = minify(mt->depth0, i);
182 compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
183 }
184 }
185
186 /* Note the required size in memory */
187 mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
188
189 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
190 "%s(%p, %p) total size %d\n",
191 __func__, rmesa, mt, mt->totalsize);
192 }
193
194 static void calculate_miptree_layout_r300(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
195 {
196 GLuint curOffset, i, level;
197
198 assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels);
199
200 curOffset = 0;
201 for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
202 GLuint face;
203
204 mt->levels[level].valid = 1;
205 mt->levels[level].width = minify(mt->width0, i);
206 mt->levels[level].height = minify(mt->height0, i);
207 mt->levels[level].depth = minify(mt->depth0, i);
208
209 for(face = 0; face < mt->faces; face++)
210 compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
211 /* r600 cube levels seems to be aligned to 8 faces but
212 * we have separate register for 1'st level offset so add
213 * 2 image alignment after 1'st mip level */
214 if(rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R600 &&
215 mt->target == GL_TEXTURE_CUBE_MAP && level >= 1)
216 curOffset += 2 * mt->levels[level].size;
217 }
218
219 /* Note the required size in memory */
220 mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
221
222 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
223 "%s(%p, %p) total size %d\n",
224 __func__, rmesa, mt, mt->totalsize);
225 }
226
227 /**
228 * Create a new mipmap tree, calculate its layout and allocate memory.
229 */
230 static radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa,
231 GLenum target, gl_format mesaFormat, GLuint baseLevel, GLuint numLevels,
232 GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits)
233 {
234 radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree);
235
236 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
237 "%s(%p) new tree is %p.\n",
238 __func__, rmesa, mt);
239
240 mt->mesaFormat = mesaFormat;
241 mt->refcount = 1;
242 mt->target = target;
243 mt->faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
244 mt->baseLevel = baseLevel;
245 mt->numLevels = numLevels;
246 mt->width0 = width0;
247 mt->height0 = height0;
248 mt->depth0 = depth0;
249 mt->tilebits = tilebits;
250
251 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R300)
252 calculate_miptree_layout_r300(rmesa, mt);
253 else
254 calculate_miptree_layout_r100(rmesa, mt);
255
256 mt->bo = radeon_bo_open(rmesa->radeonScreen->bom,
257 0, mt->totalsize, 1024,
258 RADEON_GEM_DOMAIN_VRAM,
259 0);
260
261 return mt;
262 }
263
264 void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr)
265 {
266 assert(!*ptr);
267
268 mt->refcount++;
269 assert(mt->refcount > 0);
270
271 *ptr = mt;
272 }
273
274 void radeon_miptree_unreference(radeon_mipmap_tree **ptr)
275 {
276 radeon_mipmap_tree *mt = *ptr;
277 if (!mt)
278 return;
279
280 assert(mt->refcount > 0);
281
282 mt->refcount--;
283 if (!mt->refcount) {
284 radeon_bo_unref(mt->bo);
285 free(mt);
286 }
287
288 *ptr = 0;
289 }
290
291 /**
292 * Calculate min and max LOD for the given texture object.
293 * @param[in] tObj texture object whose LOD values to calculate
294 * @param[out] pminLod minimal LOD
295 * @param[out] pmaxLod maximal LOD
296 */
297 static void calculate_min_max_lod(struct gl_texture_object *tObj,
298 unsigned *pminLod, unsigned *pmaxLod)
299 {
300 int minLod, maxLod;
301 /* Yes, this looks overly complicated, but it's all needed.
302 */
303 switch (tObj->Target) {
304 case GL_TEXTURE_1D:
305 case GL_TEXTURE_2D:
306 case GL_TEXTURE_3D:
307 case GL_TEXTURE_CUBE_MAP:
308 if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
309 /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
310 */
311 minLod = maxLod = tObj->BaseLevel;
312 } else {
313 minLod = tObj->BaseLevel + (GLint)(tObj->MinLod);
314 minLod = MAX2(minLod, tObj->BaseLevel);
315 minLod = MIN2(minLod, tObj->MaxLevel);
316 maxLod = tObj->BaseLevel + (GLint)(tObj->MaxLod + 0.5);
317 maxLod = MIN2(maxLod, tObj->MaxLevel);
318 maxLod = MIN2(maxLod, tObj->Image[0][minLod]->MaxLog2 + minLod);
319 maxLod = MAX2(maxLod, minLod); /* need at least one level */
320 }
321 break;
322 case GL_TEXTURE_RECTANGLE_NV:
323 case GL_TEXTURE_4D_SGIS:
324 minLod = maxLod = 0;
325 break;
326 default:
327 return;
328 }
329
330 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
331 "%s(%p) target %s, min %d, max %d.\n",
332 __func__, tObj,
333 _mesa_lookup_enum_by_nr(tObj->Target),
334 minLod, maxLod);
335
336 /* save these values */
337 *pminLod = minLod;
338 *pmaxLod = maxLod;
339 }
340
341 /**
342 * Checks whether the given miptree can hold the given texture image at the
343 * given face and level.
344 */
345 GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
346 struct gl_texture_image *texImage, GLuint face, GLuint level)
347 {
348 radeon_mipmap_level *lvl;
349
350 if (face >= mt->faces)
351 return GL_FALSE;
352
353 if (texImage->TexFormat != mt->mesaFormat)
354 return GL_FALSE;
355
356 lvl = &mt->levels[level];
357 if (!lvl->valid ||
358 lvl->width != texImage->Width ||
359 lvl->height != texImage->Height ||
360 lvl->depth != texImage->Depth)
361 return GL_FALSE;
362
363 return GL_TRUE;
364 }
365
366 /**
367 * Checks whether the given miptree has the right format to store the given texture object.
368 */
369 static GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj)
370 {
371 struct gl_texture_image *firstImage;
372 unsigned numLevels;
373 radeon_mipmap_level *mtBaseLevel;
374
375 if (texObj->BaseLevel < mt->baseLevel)
376 return GL_FALSE;
377
378 mtBaseLevel = &mt->levels[texObj->BaseLevel - mt->baseLevel];
379 firstImage = texObj->Image[0][texObj->BaseLevel];
380 numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, firstImage->MaxLog2 + 1);
381
382 if (radeon_is_debug_enabled(RADEON_TEXTURE,RADEON_TRACE)) {
383 fprintf(stderr, "Checking if miptree %p matches texObj %p\n", mt, texObj);
384 fprintf(stderr, "target %d vs %d\n", mt->target, texObj->Target);
385 fprintf(stderr, "format %d vs %d\n", mt->mesaFormat, firstImage->TexFormat);
386 fprintf(stderr, "numLevels %d vs %d\n", mt->numLevels, numLevels);
387 fprintf(stderr, "width0 %d vs %d\n", mtBaseLevel->width, firstImage->Width);
388 fprintf(stderr, "height0 %d vs %d\n", mtBaseLevel->height, firstImage->Height);
389 fprintf(stderr, "depth0 %d vs %d\n", mtBaseLevel->depth, firstImage->Depth);
390 if (mt->target == texObj->Target &&
391 mt->mesaFormat == firstImage->TexFormat &&
392 mt->numLevels >= numLevels &&
393 mtBaseLevel->width == firstImage->Width &&
394 mtBaseLevel->height == firstImage->Height &&
395 mtBaseLevel->depth == firstImage->Depth) {
396 fprintf(stderr, "MATCHED\n");
397 } else {
398 fprintf(stderr, "NOT MATCHED\n");
399 }
400 }
401
402 return (mt->target == texObj->Target &&
403 mt->mesaFormat == firstImage->TexFormat &&
404 mt->numLevels >= numLevels &&
405 mtBaseLevel->width == firstImage->Width &&
406 mtBaseLevel->height == firstImage->Height &&
407 mtBaseLevel->depth == firstImage->Depth);
408 }
409
410 /**
411 * Try to allocate a mipmap tree for the given texture object.
412 * @param[in] rmesa radeon context
413 * @param[in] t radeon texture object
414 */
415 void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t)
416 {
417 struct gl_texture_object *texObj = &t->base;
418 struct gl_texture_image *texImg = texObj->Image[0][texObj->BaseLevel];
419 GLuint numLevels;
420
421 assert(!t->mt);
422
423 if (!texImg) {
424 radeon_warning("%s(%p) No image in given texture object(%p).\n",
425 __func__, rmesa, t);
426 return;
427 }
428
429
430 numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, texImg->MaxLog2 + 1);
431
432 t->mt = radeon_miptree_create(rmesa, t->base.Target,
433 texImg->TexFormat, texObj->BaseLevel,
434 numLevels, texImg->Width, texImg->Height,
435 texImg->Depth, t->tile_bits);
436 }
437
438 GLuint
439 radeon_miptree_image_offset(radeon_mipmap_tree *mt,
440 GLuint face, GLuint level)
441 {
442 if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
443 return (mt->levels[level].faces[face].offset);
444 else
445 return mt->levels[level].faces[0].offset;
446 }
447
448 /**
449 * Ensure that the given image is stored in the given miptree from now on.
450 */
451 static void migrate_image_to_miptree(radeon_mipmap_tree *mt,
452 radeon_texture_image *image,
453 int face, int level)
454 {
455 radeon_mipmap_level *dstlvl = &mt->levels[level];
456 unsigned char *dest;
457
458 assert(image->mt != mt);
459 assert(dstlvl->valid);
460 assert(dstlvl->width == image->base.Width);
461 assert(dstlvl->height == image->base.Height);
462 assert(dstlvl->depth == image->base.Depth);
463
464 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
465 "%s miptree %p, image %p, face %d, level %d.\n",
466 __func__, mt, image, face, level);
467
468 radeon_bo_map(mt->bo, GL_TRUE);
469 dest = mt->bo->ptr + dstlvl->faces[face].offset;
470
471 if (image->mt) {
472 /* Format etc. should match, so we really just need a memcpy().
473 * In fact, that memcpy() could be done by the hardware in many
474 * cases, provided that we have a proper memory manager.
475 */
476 assert(mt->mesaFormat == image->base.TexFormat);
477
478 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel];
479
480 /* TODO: bring back these assertions once the FBOs are fixed */
481 #if 0
482 assert(image->mtlevel == level);
483 assert(srclvl->size == dstlvl->size);
484 assert(srclvl->rowstride == dstlvl->rowstride);
485 #endif
486
487 radeon_bo_map(image->mt->bo, GL_FALSE);
488
489 memcpy(dest,
490 image->mt->bo->ptr + srclvl->faces[face].offset,
491 dstlvl->size);
492 radeon_bo_unmap(image->mt->bo);
493
494 radeon_miptree_unreference(&image->mt);
495 } else if (image->base.Data) {
496 /* This condition should be removed, it's here to workaround
497 * a segfault when mapping textures during software fallbacks.
498 */
499 radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
500 "%s Trying to map texture in sowftware fallback.\n",
501 __func__);
502 const uint32_t srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width);
503 uint32_t rows = image->base.Height * image->base.Depth;
504
505 if (_mesa_is_format_compressed(image->base.TexFormat)) {
506 uint32_t blockWidth, blockHeight;
507 _mesa_get_format_block_size(image->base.TexFormat, &blockWidth, &blockHeight);
508 rows = (rows + blockHeight - 1) / blockHeight;
509 }
510
511 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
512 rows, srcrowstride);
513
514 _mesa_free_texmemory(image->base.Data);
515 image->base.Data = 0;
516 }
517
518 radeon_bo_unmap(mt->bo);
519
520 radeon_miptree_reference(mt, &image->mt);
521 image->mtface = face;
522 image->mtlevel = level;
523 }
524
525 /**
526 * Filter matching miptrees, and select one with the most of data.
527 * @param[in] texObj radeon texture object
528 * @param[in] firstLevel first texture level to check
529 * @param[in] lastLevel last texture level to check
530 */
531 static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj,
532 unsigned firstLevel,
533 unsigned lastLevel)
534 {
535 const unsigned numLevels = lastLevel - firstLevel + 1;
536 unsigned *mtSizes = calloc(numLevels, sizeof(unsigned));
537 radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *));
538 unsigned mtCount = 0;
539 unsigned maxMtIndex = 0;
540 radeon_mipmap_tree *tmp;
541
542 for (unsigned level = firstLevel; level <= lastLevel; ++level) {
543 radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]);
544 unsigned found = 0;
545 // TODO: why this hack??
546 if (!img)
547 break;
548
549 if (!img->mt)
550 continue;
551
552 for (int i = 0; i < mtCount; ++i) {
553 if (mts[i] == img->mt) {
554 found = 1;
555 mtSizes[i] += img->mt->levels[img->mtlevel].size;
556 break;
557 }
558 }
559
560 if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) {
561 mtSizes[mtCount] = img->mt->levels[img->mtlevel].size;
562 mts[mtCount] = img->mt;
563 mtCount++;
564 }
565 }
566
567 if (mtCount == 0) {
568 return NULL;
569 }
570
571 for (int i = 1; i < mtCount; ++i) {
572 if (mtSizes[i] > mtSizes[maxMtIndex]) {
573 maxMtIndex = i;
574 }
575 }
576
577 tmp = mts[maxMtIndex];
578 free(mtSizes);
579 free(mts);
580
581 return tmp;
582 }
583
584 /**
585 * Validate texture mipmap tree.
586 * If individual images are stored in different mipmap trees
587 * use the mipmap tree that has the most of the correct data.
588 */
589 int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
590 {
591 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
592 radeonTexObj *t = radeon_tex_obj(texObj);
593
594 if (t->validated || t->image_override) {
595 return GL_TRUE;
596 }
597
598 if (texObj->Image[0][texObj->BaseLevel]->Border > 0)
599 return GL_FALSE;
600
601 _mesa_test_texobj_completeness(rmesa->glCtx, texObj);
602 if (!texObj->_Complete) {
603 return GL_FALSE;
604 }
605
606 calculate_min_max_lod(&t->base, &t->minLod, &t->maxLod);
607
608 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
609 "%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
610 __FUNCTION__, texObj ,t->minLod, t->maxLod);
611
612 radeon_mipmap_tree *dst_miptree;
613 dst_miptree = get_biggest_matching_miptree(t, t->minLod, t->maxLod);
614
615 if (!dst_miptree) {
616 radeon_miptree_unreference(&t->mt);
617 radeon_try_alloc_miptree(rmesa, t);
618 dst_miptree = t->mt;
619 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
620 "%s: No matching miptree found, allocated new one %p\n",
621 __FUNCTION__, t->mt);
622
623 } else {
624 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
625 "%s: Using miptree %p\n", __FUNCTION__, t->mt);
626 }
627
628 const unsigned faces = texObj->Target == GL_TEXTURE_CUBE_MAP ? 6 : 1;
629 unsigned face, level;
630 radeon_texture_image *img;
631 /* Validate only the levels that will actually be used during rendering */
632 for (face = 0; face < faces; ++face) {
633 for (level = t->minLod; level <= t->maxLod; ++level) {
634 img = get_radeon_texture_image(texObj->Image[face][level]);
635
636 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
637 "Checking image level %d, face %d, mt %p ... ",
638 level, face, img->mt);
639
640 if (img->mt != dst_miptree) {
641 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
642 "MIGRATING\n");
643
644 struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo;
645 if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) {
646 radeon_firevertices(rmesa);
647 }
648 migrate_image_to_miptree(dst_miptree, img, face, level);
649 } else
650 radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n");
651 }
652 }
653
654 t->validated = GL_TRUE;
655
656 return GL_TRUE;
657 }
658
659 uint32_t get_base_teximage_offset(radeonTexObj *texObj)
660 {
661 if (!texObj->mt) {
662 return 0;
663 } else {
664 return radeon_miptree_image_offset(texObj->mt, 0, texObj->minLod);
665 }
666 }