radeon: minor refactoring of mipmap code
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_mipmap_tree.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 *
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include "radeon_mipmap_tree.h"
30
31 #include <errno.h>
32 #include <unistd.h>
33
34 #include "main/simple_list.h"
35 #include "main/teximage.h"
36 #include "main/texobj.h"
37 #include "main/enums.h"
38 #include "radeon_texture.h"
39 #include "radeon_tile.h"
40
41 static unsigned get_aligned_compressed_row_stride(
42 gl_format format,
43 unsigned width,
44 unsigned minStride)
45 {
46 const unsigned blockBytes = _mesa_get_format_bytes(format);
47 unsigned blockWidth, blockHeight;
48 unsigned stride;
49
50 _mesa_get_format_block_size(format, &blockWidth, &blockHeight);
51
52 /* Count number of blocks required to store the given width.
53 * And then multiple it with bytes required to store a block.
54 */
55 stride = (width + blockWidth - 1) / blockWidth * blockBytes;
56
57 /* Round the given minimum stride to the next full blocksize.
58 * (minStride + blockBytes - 1) / blockBytes * blockBytes
59 */
60 if ( stride < minStride )
61 stride = (minStride + blockBytes - 1) / blockBytes * blockBytes;
62
63 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
64 "%s width %u, minStride %u, block(bytes %u, width %u):"
65 "stride %u\n",
66 __func__, width, minStride,
67 blockBytes, blockWidth,
68 stride);
69
70 return stride;
71 }
72
73 unsigned get_texture_image_size(
74 gl_format format,
75 unsigned rowStride,
76 unsigned height,
77 unsigned depth,
78 unsigned tiling)
79 {
80 if (_mesa_is_format_compressed(format)) {
81 unsigned blockWidth, blockHeight;
82
83 _mesa_get_format_block_size(format, &blockWidth, &blockHeight);
84
85 return rowStride * ((height + blockHeight - 1) / blockHeight) * depth;
86 } else if (tiling) {
87 /* Need to align height to tile height */
88 unsigned tileWidth, tileHeight;
89
90 get_tile_size(format, &tileWidth, &tileHeight);
91 tileHeight--;
92
93 height = (height + tileHeight) & ~tileHeight;
94 }
95
96 return rowStride * height * depth;
97 }
98
99 static unsigned is_pot(unsigned value)
100 {
101 unsigned m;
102
103 for (m = 1; m < value; m *= 2) {}
104
105 return value == m;
106 }
107
108 unsigned get_texture_image_row_stride(radeonContextPtr rmesa, gl_format format, unsigned width)
109 {
110 if (_mesa_is_format_compressed(format)) {
111 return get_aligned_compressed_row_stride(format, width, rmesa->texture_compressed_row_align);
112 } else {
113 unsigned row_align;
114
115 if (is_pot(width)) {
116 row_align = rmesa->texture_row_align - 1;
117 } else {
118 row_align = rmesa->texture_rect_row_align - 1;
119 }
120
121 return (_mesa_format_row_stride(format, width) + row_align) & ~row_align;
122 }
123 }
124
125 /**
126 * Compute sizes and fill in offset and blit information for the given
127 * image (determined by \p face and \p level).
128 *
129 * \param curOffset points to the offset at which the image is to be stored
130 * and is updated by this function according to the size of the image.
131 */
132 static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree *mt,
133 GLuint face, GLuint level, GLuint* curOffset)
134 {
135 radeon_mipmap_level *lvl = &mt->levels[level];
136 GLuint height;
137
138 height = _mesa_next_pow_two_32(lvl->height);
139
140 lvl->rowstride = get_texture_image_row_stride(rmesa, mt->mesaFormat, lvl->width);
141 lvl->size = get_texture_image_size(mt->mesaFormat, lvl->rowstride, lvl->height, lvl->depth, mt->tilebits);
142
143 assert(lvl->size > 0);
144
145 lvl->faces[face].offset = *curOffset;
146 *curOffset += lvl->size;
147
148 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
149 "%s(%p) level %d, face %d: rs:%d %dx%d at %d\n",
150 __func__, rmesa,
151 level, face,
152 lvl->rowstride, lvl->width, height, lvl->faces[face].offset);
153 }
154
155 static GLuint minify(GLuint size, GLuint levels)
156 {
157 size = size >> levels;
158 if (size < 1)
159 size = 1;
160 return size;
161 }
162
163
164 static void calculate_miptree_layout_r100(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
165 {
166 GLuint curOffset, i, face, level;
167
168 assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels);
169
170 curOffset = 0;
171 for(face = 0; face < mt->faces; face++) {
172
173 for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
174 mt->levels[level].valid = 1;
175 mt->levels[level].width = minify(mt->width0, i);
176 mt->levels[level].height = minify(mt->height0, i);
177 mt->levels[level].depth = minify(mt->depth0, i);
178 compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
179 }
180 }
181
182 /* Note the required size in memory */
183 mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
184
185 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
186 "%s(%p, %p) total size %d\n",
187 __func__, rmesa, mt, mt->totalsize);
188 }
189
190 static void calculate_miptree_layout_r300(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
191 {
192 GLuint curOffset, i, level;
193
194 assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels);
195
196 curOffset = 0;
197 for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
198 GLuint face;
199
200 mt->levels[level].valid = 1;
201 mt->levels[level].width = minify(mt->width0, i);
202 mt->levels[level].height = minify(mt->height0, i);
203 mt->levels[level].depth = minify(mt->depth0, i);
204
205 for(face = 0; face < mt->faces; face++)
206 compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
207 /* r600 cube levels seems to be aligned to 8 faces but
208 * we have separate register for 1'st level offset so add
209 * 2 image alignment after 1'st mip level */
210 if(rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R600 &&
211 mt->target == GL_TEXTURE_CUBE_MAP && level >= 1)
212 curOffset += 2 * mt->levels[level].size;
213 }
214
215 /* Note the required size in memory */
216 mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
217
218 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
219 "%s(%p, %p) total size %d\n",
220 __func__, rmesa, mt, mt->totalsize);
221 }
222
223 /**
224 * Create a new mipmap tree, calculate its layout and allocate memory.
225 */
226 static radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa,
227 GLenum target, gl_format mesaFormat, GLuint baseLevel, GLuint numLevels,
228 GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits)
229 {
230 radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree);
231
232 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
233 "%s(%p) new tree is %p.\n",
234 __func__, rmesa, mt);
235
236 mt->mesaFormat = mesaFormat;
237 mt->refcount = 1;
238 mt->target = target;
239 mt->faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
240 mt->baseLevel = baseLevel;
241 mt->numLevels = numLevels;
242 mt->width0 = width0;
243 mt->height0 = height0;
244 mt->depth0 = depth0;
245 mt->tilebits = tilebits;
246
247 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R300)
248 calculate_miptree_layout_r300(rmesa, mt);
249 else
250 calculate_miptree_layout_r100(rmesa, mt);
251
252 mt->bo = radeon_bo_open(rmesa->radeonScreen->bom,
253 0, mt->totalsize, 1024,
254 RADEON_GEM_DOMAIN_VRAM,
255 0);
256
257 return mt;
258 }
259
260 void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr)
261 {
262 assert(!*ptr);
263
264 mt->refcount++;
265 assert(mt->refcount > 0);
266
267 *ptr = mt;
268 }
269
270 void radeon_miptree_unreference(radeon_mipmap_tree **ptr)
271 {
272 radeon_mipmap_tree *mt = *ptr;
273 if (!mt)
274 return;
275
276 assert(mt->refcount > 0);
277
278 mt->refcount--;
279 if (!mt->refcount) {
280 radeon_bo_unref(mt->bo);
281 free(mt);
282 }
283
284 *ptr = 0;
285 }
286
287 /**
288 * Calculate min and max LOD for the given texture object.
289 * @param[in] tObj texture object whose LOD values to calculate
290 * @param[out] pminLod minimal LOD
291 * @param[out] pmaxLod maximal LOD
292 */
293 static void calculate_min_max_lod(struct gl_texture_object *tObj,
294 unsigned *pminLod, unsigned *pmaxLod)
295 {
296 int minLod, maxLod;
297 /* Yes, this looks overly complicated, but it's all needed.
298 */
299 switch (tObj->Target) {
300 case GL_TEXTURE_1D:
301 case GL_TEXTURE_2D:
302 case GL_TEXTURE_3D:
303 case GL_TEXTURE_CUBE_MAP:
304 if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
305 /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
306 */
307 minLod = maxLod = tObj->BaseLevel;
308 } else {
309 minLod = tObj->BaseLevel + (GLint)(tObj->MinLod);
310 minLod = MAX2(minLod, tObj->BaseLevel);
311 minLod = MIN2(minLod, tObj->MaxLevel);
312 maxLod = tObj->BaseLevel + (GLint)(tObj->MaxLod + 0.5);
313 maxLod = MIN2(maxLod, tObj->MaxLevel);
314 maxLod = MIN2(maxLod, tObj->Image[0][minLod]->MaxLog2 + minLod);
315 maxLod = MAX2(maxLod, minLod); /* need at least one level */
316 }
317 break;
318 case GL_TEXTURE_RECTANGLE_NV:
319 case GL_TEXTURE_4D_SGIS:
320 minLod = maxLod = 0;
321 break;
322 default:
323 return;
324 }
325
326 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
327 "%s(%p) target %s, min %d, max %d.\n",
328 __func__, tObj,
329 _mesa_lookup_enum_by_nr(tObj->Target),
330 minLod, maxLod);
331
332 /* save these values */
333 *pminLod = minLod;
334 *pmaxLod = maxLod;
335 }
336
337 /**
338 * Checks whether the given miptree can hold the given texture image at the
339 * given face and level.
340 */
341 GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
342 struct gl_texture_image *texImage, GLuint face, GLuint level)
343 {
344 radeon_mipmap_level *lvl;
345
346 if (face >= mt->faces)
347 return GL_FALSE;
348
349 if (texImage->TexFormat != mt->mesaFormat)
350 return GL_FALSE;
351
352 lvl = &mt->levels[level];
353 if (!lvl->valid ||
354 lvl->width != texImage->Width ||
355 lvl->height != texImage->Height ||
356 lvl->depth != texImage->Depth)
357 return GL_FALSE;
358
359 return GL_TRUE;
360 }
361
362 /**
363 * Checks whether the given miptree has the right format to store the given texture object.
364 */
365 static GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj)
366 {
367 struct gl_texture_image *firstImage;
368 unsigned numLevels;
369 radeon_mipmap_level *mtBaseLevel;
370
371 if (texObj->BaseLevel < mt->baseLevel)
372 return GL_FALSE;
373
374 mtBaseLevel = &mt->levels[texObj->BaseLevel - mt->baseLevel];
375 firstImage = texObj->Image[0][texObj->BaseLevel];
376 numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, firstImage->MaxLog2 + 1);
377
378 if (radeon_is_debug_enabled(RADEON_TEXTURE,RADEON_TRACE)) {
379 fprintf(stderr, "Checking if miptree %p matches texObj %p\n", mt, texObj);
380 fprintf(stderr, "target %d vs %d\n", mt->target, texObj->Target);
381 fprintf(stderr, "format %d vs %d\n", mt->mesaFormat, firstImage->TexFormat);
382 fprintf(stderr, "numLevels %d vs %d\n", mt->numLevels, numLevels);
383 fprintf(stderr, "width0 %d vs %d\n", mtBaseLevel->width, firstImage->Width);
384 fprintf(stderr, "height0 %d vs %d\n", mtBaseLevel->height, firstImage->Height);
385 fprintf(stderr, "depth0 %d vs %d\n", mtBaseLevel->depth, firstImage->Depth);
386 if (mt->target == texObj->Target &&
387 mt->mesaFormat == firstImage->TexFormat &&
388 mt->numLevels >= numLevels &&
389 mtBaseLevel->width == firstImage->Width &&
390 mtBaseLevel->height == firstImage->Height &&
391 mtBaseLevel->depth == firstImage->Depth) {
392 fprintf(stderr, "MATCHED\n");
393 } else {
394 fprintf(stderr, "NOT MATCHED\n");
395 }
396 }
397
398 return (mt->target == texObj->Target &&
399 mt->mesaFormat == firstImage->TexFormat &&
400 mt->numLevels >= numLevels &&
401 mtBaseLevel->width == firstImage->Width &&
402 mtBaseLevel->height == firstImage->Height &&
403 mtBaseLevel->depth == firstImage->Depth);
404 }
405
406 /**
407 * Try to allocate a mipmap tree for the given texture object.
408 * @param[in] rmesa radeon context
409 * @param[in] t radeon texture object
410 */
411 void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t)
412 {
413 struct gl_texture_object *texObj = &t->base;
414 struct gl_texture_image *texImg = texObj->Image[0][texObj->BaseLevel];
415 GLuint numLevels;
416
417 assert(!t->mt);
418
419 if (!texImg) {
420 radeon_warning("%s(%p) No image in given texture object(%p).\n",
421 __func__, rmesa, t);
422 return;
423 }
424
425
426 numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, texImg->MaxLog2 + 1);
427
428 t->mt = radeon_miptree_create(rmesa, t->base.Target,
429 texImg->TexFormat, texObj->BaseLevel,
430 numLevels, texImg->Width, texImg->Height,
431 texImg->Depth, t->tile_bits);
432 }
433
434 GLuint
435 radeon_miptree_image_offset(radeon_mipmap_tree *mt,
436 GLuint face, GLuint level)
437 {
438 if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
439 return (mt->levels[level].faces[face].offset);
440 else
441 return mt->levels[level].faces[0].offset;
442 }
443
444 /**
445 * Ensure that the given image is stored in the given miptree from now on.
446 */
447 static void migrate_image_to_miptree(radeon_mipmap_tree *mt,
448 radeon_texture_image *image,
449 int face, int level)
450 {
451 radeon_mipmap_level *dstlvl = &mt->levels[level];
452 unsigned char *dest;
453
454 assert(image->mt != mt);
455 assert(dstlvl->valid);
456 assert(dstlvl->width == image->base.Width);
457 assert(dstlvl->height == image->base.Height);
458 assert(dstlvl->depth == image->base.Depth);
459
460 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
461 "%s miptree %p, image %p, face %d, level %d.\n",
462 __func__, mt, image, face, level);
463
464 radeon_bo_map(mt->bo, GL_TRUE);
465 dest = mt->bo->ptr + dstlvl->faces[face].offset;
466
467 if (image->mt) {
468 /* Format etc. should match, so we really just need a memcpy().
469 * In fact, that memcpy() could be done by the hardware in many
470 * cases, provided that we have a proper memory manager.
471 */
472 assert(mt->mesaFormat == image->base.TexFormat);
473
474 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel];
475
476 /* TODO: bring back these assertions once the FBOs are fixed */
477 #if 0
478 assert(image->mtlevel == level);
479 assert(srclvl->size == dstlvl->size);
480 assert(srclvl->rowstride == dstlvl->rowstride);
481 #endif
482
483 radeon_bo_map(image->mt->bo, GL_FALSE);
484
485 memcpy(dest,
486 image->mt->bo->ptr + srclvl->faces[face].offset,
487 dstlvl->size);
488 radeon_bo_unmap(image->mt->bo);
489
490 radeon_miptree_unreference(&image->mt);
491 } else if (image->base.Data) {
492 /* This condition should be removed, it's here to workaround
493 * a segfault when mapping textures during software fallbacks.
494 */
495 radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
496 "%s Trying to map texture in sowftware fallback.\n",
497 __func__);
498 const uint32_t srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width);
499 uint32_t rows = image->base.Height * image->base.Depth;
500
501 if (_mesa_is_format_compressed(image->base.TexFormat)) {
502 uint32_t blockWidth, blockHeight;
503 _mesa_get_format_block_size(image->base.TexFormat, &blockWidth, &blockHeight);
504 rows = (rows + blockHeight - 1) / blockHeight;
505 }
506
507 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
508 rows, srcrowstride);
509
510 _mesa_free_texmemory(image->base.Data);
511 image->base.Data = 0;
512 }
513
514 radeon_bo_unmap(mt->bo);
515
516 radeon_miptree_reference(mt, &image->mt);
517 image->mtface = face;
518 image->mtlevel = level;
519 }
520
521 /**
522 * Filter matching miptrees, and select one with the most of data.
523 * @param[in] texObj radeon texture object
524 * @param[in] firstLevel first texture level to check
525 * @param[in] lastLevel last texture level to check
526 */
527 static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj,
528 unsigned firstLevel,
529 unsigned lastLevel)
530 {
531 const unsigned numLevels = lastLevel - firstLevel + 1;
532 unsigned *mtSizes = calloc(numLevels, sizeof(unsigned));
533 radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *));
534 unsigned mtCount = 0;
535 unsigned maxMtIndex = 0;
536 radeon_mipmap_tree *tmp;
537
538 for (unsigned level = firstLevel; level <= lastLevel; ++level) {
539 radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]);
540 unsigned found = 0;
541 // TODO: why this hack??
542 if (!img)
543 break;
544
545 if (!img->mt)
546 continue;
547
548 for (int i = 0; i < mtCount; ++i) {
549 if (mts[i] == img->mt) {
550 found = 1;
551 mtSizes[i] += img->mt->levels[img->mtlevel].size;
552 break;
553 }
554 }
555
556 if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) {
557 mtSizes[mtCount] = img->mt->levels[img->mtlevel].size;
558 mts[mtCount] = img->mt;
559 mtCount++;
560 }
561 }
562
563 if (mtCount == 0) {
564 return NULL;
565 }
566
567 for (int i = 1; i < mtCount; ++i) {
568 if (mtSizes[i] > mtSizes[maxMtIndex]) {
569 maxMtIndex = i;
570 }
571 }
572
573 tmp = mts[maxMtIndex];
574 free(mtSizes);
575 free(mts);
576
577 return tmp;
578 }
579
580 /**
581 * Validate texture mipmap tree.
582 * If individual images are stored in different mipmap trees
583 * use the mipmap tree that has the most of the correct data.
584 */
585 int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
586 {
587 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
588 radeonTexObj *t = radeon_tex_obj(texObj);
589
590 if (t->validated || t->image_override) {
591 return GL_TRUE;
592 }
593
594 if (texObj->Image[0][texObj->BaseLevel]->Border > 0)
595 return GL_FALSE;
596
597 _mesa_test_texobj_completeness(rmesa->glCtx, texObj);
598 if (!texObj->_Complete) {
599 return GL_FALSE;
600 }
601
602 calculate_min_max_lod(&t->base, &t->minLod, &t->maxLod);
603
604 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
605 "%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
606 __FUNCTION__, texObj ,t->minLod, t->maxLod);
607
608 radeon_mipmap_tree *dst_miptree;
609 dst_miptree = get_biggest_matching_miptree(t, t->minLod, t->maxLod);
610
611 if (!dst_miptree) {
612 radeon_miptree_unreference(&t->mt);
613 radeon_try_alloc_miptree(rmesa, t);
614 dst_miptree = t->mt;
615 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
616 "%s: No matching miptree found, allocated new one %p\n",
617 __FUNCTION__, t->mt);
618
619 } else {
620 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
621 "%s: Using miptree %p\n", __FUNCTION__, t->mt);
622 }
623
624 const unsigned faces = texObj->Target == GL_TEXTURE_CUBE_MAP ? 6 : 1;
625 unsigned face, level;
626 radeon_texture_image *img;
627 /* Validate only the levels that will actually be used during rendering */
628 for (face = 0; face < faces; ++face) {
629 for (level = t->minLod; level <= t->maxLod; ++level) {
630 img = get_radeon_texture_image(texObj->Image[face][level]);
631
632 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
633 "Checking image level %d, face %d, mt %p ... ",
634 level, face, img->mt);
635
636 if (img->mt != dst_miptree) {
637 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
638 "MIGRATING\n");
639
640 struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo;
641 if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) {
642 radeon_firevertices(rmesa);
643 }
644 migrate_image_to_miptree(dst_miptree, img, face, level);
645 } else
646 radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n");
647 }
648 }
649
650 t->validated = GL_TRUE;
651
652 return GL_TRUE;
653 }
654
655 uint32_t get_base_teximage_offset(radeonTexObj *texObj)
656 {
657 if (!texObj->mt) {
658 return 0;
659 } else {
660 return radeon_miptree_image_offset(texObj->mt, 0, texObj->minLod);
661 }
662 }