Fix image_matches_texture_obj() MaxLevel check
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/convolve.h"
36 #include "main/enums.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42
43 #include "xmlpool.h" /* for symbolic values of enum-type options */
44
45 #include "radeon_common.h"
46
47 #include "radeon_mipmap_tree.h"
48
49
50 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
51 GLuint numrows, GLuint rowsize)
52 {
53 assert(rowsize <= dststride);
54 assert(rowsize <= srcstride);
55
56 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
57 "%s dst %p, stride %u, src %p, stride %u, "
58 "numrows %u, rowsize %u.\n",
59 __func__, dst, dststride,
60 src, srcstride,
61 numrows, rowsize);
62
63 if (rowsize == srcstride && rowsize == dststride) {
64 memcpy(dst, src, numrows*rowsize);
65 } else {
66 GLuint i;
67 for(i = 0; i < numrows; ++i) {
68 memcpy(dst, src, rowsize);
69 dst += dststride;
70 src += srcstride;
71 }
72 }
73 }
74
75 /* textures */
76 /**
77 * Allocate an empty texture image object.
78 */
79 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
80 {
81 return CALLOC(sizeof(radeon_texture_image));
82 }
83
84 /**
85 * Free memory associated with this texture image.
86 */
87 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
88 {
89 radeon_texture_image* image = get_radeon_texture_image(timage);
90
91 if (image->mt) {
92 radeon_miptree_unreference(&image->mt);
93 assert(!image->base.Data);
94 } else {
95 _mesa_free_texture_image_data(ctx, timage);
96 }
97 if (image->bo) {
98 radeon_bo_unref(image->bo);
99 image->bo = NULL;
100 }
101 if (timage->Data) {
102 _mesa_free_texmemory(timage->Data);
103 timage->Data = NULL;
104 }
105 }
106
107 /* Set Data pointer and additional data for mapped texture image */
108 static void teximage_set_map_data(radeon_texture_image *image)
109 {
110 radeon_mipmap_level *lvl;
111
112 if (!image->mt) {
113 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
114 __func__, image);
115
116 return;
117 }
118
119 lvl = &image->mt->levels[image->mtlevel];
120
121 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
122 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
123 }
124
125
126 /**
127 * Map a single texture image for glTexImage and friends.
128 */
129 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
130 {
131 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
132 "%s(img %p), write_enable %s.\n",
133 __func__, image,
134 write_enable ? "true": "false");
135 if (image->mt) {
136 assert(!image->base.Data);
137
138 radeon_bo_map(image->mt->bo, write_enable);
139 teximage_set_map_data(image);
140 }
141 }
142
143
144 void radeon_teximage_unmap(radeon_texture_image *image)
145 {
146 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
147 "%s(img %p)\n",
148 __func__, image);
149 if (image->mt) {
150 assert(image->base.Data);
151
152 image->base.Data = 0;
153 radeon_bo_unmap(image->mt->bo);
154 }
155 }
156
157 static void map_override(GLcontext *ctx, radeonTexObj *t)
158 {
159 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
160
161 radeon_bo_map(t->bo, GL_FALSE);
162
163 img->base.Data = t->bo->ptr;
164 }
165
166 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
167 {
168 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
169
170 radeon_bo_unmap(t->bo);
171
172 img->base.Data = NULL;
173 }
174
175 /**
176 * Map a validated texture for reading during software rendering.
177 */
178 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
179 {
180 radeonTexObj* t = radeon_tex_obj(texObj);
181 int face, level;
182
183 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
184 "%s(%p, tex %p)\n",
185 __func__, ctx, texObj);
186
187 if (!radeon_validate_texture_miptree(ctx, texObj)) {
188 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
189 "sw fallback.\n",
190 __func__, ctx, texObj);
191 return;
192 }
193
194 if (t->image_override && t->bo) {
195 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
196 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
197 __func__, ctx, texObj);
198
199 map_override(ctx, t);
200 }
201
202 /* for r100 3D sw fallbacks don't have mt */
203 if (!t->mt) {
204 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
205 __func__, ctx, texObj);
206 return;
207 }
208
209 radeon_bo_map(t->mt->bo, GL_FALSE);
210 for(face = 0; face < t->mt->faces; ++face) {
211 for(level = t->minLod; level <= t->maxLod; ++level)
212 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
213 }
214 }
215
216 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
217 {
218 radeonTexObj* t = radeon_tex_obj(texObj);
219 int face, level;
220
221 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
222 "%s(%p, tex %p)\n",
223 __func__, ctx, texObj);
224
225 if (t->image_override && t->bo)
226 unmap_override(ctx, t);
227 /* for r100 3D sw fallbacks don't have mt */
228 if (!t->mt)
229 return;
230
231 for(face = 0; face < t->mt->faces; ++face) {
232 for(level = t->minLod; level <= t->maxLod; ++level)
233 texObj->Image[face][level]->Data = 0;
234 }
235 radeon_bo_unmap(t->mt->bo);
236 }
237
238 /**
239 * Wraps Mesa's implementation to ensure that the base level image is mapped.
240 *
241 * This relies on internal details of _mesa_generate_mipmap, in particular
242 * the fact that the memory for recreated texture images is always freed.
243 */
244 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
245 struct gl_texture_object *texObj)
246 {
247 radeonTexObj* t = radeon_tex_obj(texObj);
248 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
249 int i, face;
250
251 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
252 "%s(%p, tex %p) Target type %s.\n",
253 __func__, ctx, texObj,
254 _mesa_lookup_enum_by_nr(target));
255
256 _mesa_generate_mipmap(ctx, target, texObj);
257
258 for (face = 0; face < nr_faces; face++) {
259 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
260 radeon_texture_image *image;
261
262 image = get_radeon_texture_image(texObj->Image[face][i]);
263
264 if (image == NULL)
265 break;
266
267 image->mtlevel = i;
268 image->mtface = face;
269
270 radeon_miptree_unreference(&image->mt);
271 }
272 }
273
274 }
275
276 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
277 {
278 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
279 struct radeon_bo *bo;
280 GLuint face = _mesa_tex_target_to_face(target);
281 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
282 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
283
284 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
285 "%s(%p, target %s, tex %p)\n",
286 __func__, ctx, _mesa_lookup_enum_by_nr(target),
287 texObj);
288
289 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
290 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
291 "%s(%p, tex %p) Trying to generate mipmap for texture "
292 "in processing by GPU.\n",
293 __func__, ctx, texObj);
294 radeon_firevertices(rmesa);
295 }
296
297 radeon_teximage_map(baseimage, GL_FALSE);
298 radeon_generate_mipmap(ctx, target, texObj);
299 radeon_teximage_unmap(baseimage);
300 }
301
302
303 /* try to find a format which will only need a memcopy */
304 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
305 GLenum srcFormat,
306 GLenum srcType, GLboolean fbo)
307 {
308 const GLuint ui = 1;
309 const GLubyte littleEndian = *((const GLubyte *)&ui);
310
311 /* r100 can only do this */
312 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
313 return _dri_texformat_argb8888;
314
315 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
316 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
317 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
318 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
319 return MESA_FORMAT_RGBA8888;
320 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
321 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
322 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
324 return MESA_FORMAT_RGBA8888_REV;
325 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
326 return _dri_texformat_argb8888;
327 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
328 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
329 return MESA_FORMAT_ARGB8888_REV;
330 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
331 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
332 return MESA_FORMAT_ARGB8888;
333 } else
334 return _dri_texformat_argb8888;
335 }
336
337 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
338 GLint internalFormat,
339 GLenum format,
340 GLenum type)
341 {
342 return radeonChooseTextureFormat(ctx, internalFormat, format,
343 type, 0);
344 }
345
346 gl_format radeonChooseTextureFormat(GLcontext * ctx,
347 GLint internalFormat,
348 GLenum format,
349 GLenum type, GLboolean fbo)
350 {
351 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
352 const GLboolean do32bpt =
353 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
354 const GLboolean force16bpt =
355 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
356 (void)format;
357
358 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
359 "%s InternalFormat=%s(%d) type=%s format=%s\n",
360 __func__,
361 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
362 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
363 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
364 "%s do32bpt=%d force16bpt=%d\n",
365 __func__, do32bpt, force16bpt);
366
367 switch (internalFormat) {
368 case 4:
369 case GL_RGBA:
370 case GL_COMPRESSED_RGBA:
371 switch (type) {
372 case GL_UNSIGNED_INT_10_10_10_2:
373 case GL_UNSIGNED_INT_2_10_10_10_REV:
374 return do32bpt ? _dri_texformat_argb8888 :
375 _dri_texformat_argb1555;
376 case GL_UNSIGNED_SHORT_4_4_4_4:
377 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
378 return _dri_texformat_argb4444;
379 case GL_UNSIGNED_SHORT_5_5_5_1:
380 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
381 return _dri_texformat_argb1555;
382 default:
383 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
384 _dri_texformat_argb4444;
385 }
386
387 case 3:
388 case GL_RGB:
389 case GL_COMPRESSED_RGB:
390 switch (type) {
391 case GL_UNSIGNED_SHORT_4_4_4_4:
392 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
393 return _dri_texformat_argb4444;
394 case GL_UNSIGNED_SHORT_5_5_5_1:
395 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
396 return _dri_texformat_argb1555;
397 case GL_UNSIGNED_SHORT_5_6_5:
398 case GL_UNSIGNED_SHORT_5_6_5_REV:
399 return _dri_texformat_rgb565;
400 default:
401 return do32bpt ? _dri_texformat_argb8888 :
402 _dri_texformat_rgb565;
403 }
404
405 case GL_RGBA8:
406 case GL_RGB10_A2:
407 case GL_RGBA12:
408 case GL_RGBA16:
409 return !force16bpt ?
410 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
411 _dri_texformat_argb4444;
412
413 case GL_RGBA4:
414 case GL_RGBA2:
415 return _dri_texformat_argb4444;
416
417 case GL_RGB5_A1:
418 return _dri_texformat_argb1555;
419
420 case GL_RGB8:
421 case GL_RGB10:
422 case GL_RGB12:
423 case GL_RGB16:
424 return !force16bpt ? _dri_texformat_argb8888 :
425 _dri_texformat_rgb565;
426
427 case GL_RGB5:
428 case GL_RGB4:
429 case GL_R3_G3_B2:
430 return _dri_texformat_rgb565;
431
432 case GL_ALPHA:
433 case GL_ALPHA4:
434 case GL_ALPHA8:
435 case GL_ALPHA12:
436 case GL_ALPHA16:
437 case GL_COMPRESSED_ALPHA:
438 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
439 in wrong rgb values (same as alpha value instead of 0). */
440 if (IS_R200_CLASS(rmesa->radeonScreen))
441 return _dri_texformat_al88;
442 else
443 return _dri_texformat_a8;
444 case 1:
445 case GL_LUMINANCE:
446 case GL_LUMINANCE4:
447 case GL_LUMINANCE8:
448 case GL_LUMINANCE12:
449 case GL_LUMINANCE16:
450 case GL_COMPRESSED_LUMINANCE:
451 return _dri_texformat_l8;
452
453 case 2:
454 case GL_LUMINANCE_ALPHA:
455 case GL_LUMINANCE4_ALPHA4:
456 case GL_LUMINANCE6_ALPHA2:
457 case GL_LUMINANCE8_ALPHA8:
458 case GL_LUMINANCE12_ALPHA4:
459 case GL_LUMINANCE12_ALPHA12:
460 case GL_LUMINANCE16_ALPHA16:
461 case GL_COMPRESSED_LUMINANCE_ALPHA:
462 return _dri_texformat_al88;
463
464 case GL_INTENSITY:
465 case GL_INTENSITY4:
466 case GL_INTENSITY8:
467 case GL_INTENSITY12:
468 case GL_INTENSITY16:
469 case GL_COMPRESSED_INTENSITY:
470 return _dri_texformat_i8;
471
472 case GL_YCBCR_MESA:
473 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
474 type == GL_UNSIGNED_BYTE)
475 return MESA_FORMAT_YCBCR;
476 else
477 return MESA_FORMAT_YCBCR_REV;
478
479 case GL_RGB_S3TC:
480 case GL_RGB4_S3TC:
481 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
482 return MESA_FORMAT_RGB_DXT1;
483
484 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
485 return MESA_FORMAT_RGBA_DXT1;
486
487 case GL_RGBA_S3TC:
488 case GL_RGBA4_S3TC:
489 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
490 return MESA_FORMAT_RGBA_DXT3;
491
492 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
493 return MESA_FORMAT_RGBA_DXT5;
494
495 case GL_ALPHA16F_ARB:
496 return MESA_FORMAT_ALPHA_FLOAT16;
497 case GL_ALPHA32F_ARB:
498 return MESA_FORMAT_ALPHA_FLOAT32;
499 case GL_LUMINANCE16F_ARB:
500 return MESA_FORMAT_LUMINANCE_FLOAT16;
501 case GL_LUMINANCE32F_ARB:
502 return MESA_FORMAT_LUMINANCE_FLOAT32;
503 case GL_LUMINANCE_ALPHA16F_ARB:
504 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
505 case GL_LUMINANCE_ALPHA32F_ARB:
506 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
507 case GL_INTENSITY16F_ARB:
508 return MESA_FORMAT_INTENSITY_FLOAT16;
509 case GL_INTENSITY32F_ARB:
510 return MESA_FORMAT_INTENSITY_FLOAT32;
511 case GL_RGB16F_ARB:
512 return MESA_FORMAT_RGBA_FLOAT16;
513 case GL_RGB32F_ARB:
514 return MESA_FORMAT_RGBA_FLOAT32;
515 case GL_RGBA16F_ARB:
516 return MESA_FORMAT_RGBA_FLOAT16;
517 case GL_RGBA32F_ARB:
518 return MESA_FORMAT_RGBA_FLOAT32;
519
520 #ifdef RADEON_R300
521 case GL_DEPTH_COMPONENT:
522 case GL_DEPTH_COMPONENT16:
523 return MESA_FORMAT_Z16;
524 case GL_DEPTH_COMPONENT24:
525 case GL_DEPTH_COMPONENT32:
526 case GL_DEPTH_STENCIL_EXT:
527 case GL_DEPTH24_STENCIL8_EXT:
528 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
529 return MESA_FORMAT_S8_Z24;
530 else
531 return MESA_FORMAT_Z16;
532 #else
533 case GL_DEPTH_COMPONENT:
534 case GL_DEPTH_COMPONENT16:
535 case GL_DEPTH_COMPONENT24:
536 case GL_DEPTH_COMPONENT32:
537 case GL_DEPTH_STENCIL_EXT:
538 case GL_DEPTH24_STENCIL8_EXT:
539 return MESA_FORMAT_S8_Z24;
540 #endif
541
542 /* EXT_texture_sRGB */
543 case GL_SRGB:
544 case GL_SRGB8:
545 case GL_SRGB_ALPHA:
546 case GL_SRGB8_ALPHA8:
547 case GL_COMPRESSED_SRGB:
548 case GL_COMPRESSED_SRGB_ALPHA:
549 return MESA_FORMAT_SRGBA8;
550
551 case GL_SLUMINANCE:
552 case GL_SLUMINANCE8:
553 case GL_COMPRESSED_SLUMINANCE:
554 return MESA_FORMAT_SL8;
555
556 case GL_SLUMINANCE_ALPHA:
557 case GL_SLUMINANCE8_ALPHA8:
558 case GL_COMPRESSED_SLUMINANCE_ALPHA:
559 return MESA_FORMAT_SLA8;
560
561 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
562 return MESA_FORMAT_SRGB_DXT1;
563 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
564 return MESA_FORMAT_SRGBA_DXT1;
565 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
566 return MESA_FORMAT_SRGBA_DXT3;
567 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
568 return MESA_FORMAT_SRGBA_DXT5;
569
570 default:
571 _mesa_problem(ctx,
572 "unexpected internalFormat 0x%x in %s",
573 (int)internalFormat, __func__);
574 return MESA_FORMAT_NONE;
575 }
576
577 return MESA_FORMAT_NONE; /* never get here */
578 }
579
580 /** Check if given image is valid within current texture object.
581 */
582 static int image_matches_texture_obj(struct gl_texture_object *texObj,
583 struct gl_texture_image *texImage,
584 unsigned level)
585 {
586 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
587
588 if (!baseImage)
589 return 0;
590
591 /* Check image level against object BaseLevel, but not MaxLevel. MaxLevel is not
592 * the highest level that can be assigned to the miptree.
593 */
594 const unsigned maxLevel = texObj->BaseLevel + baseImage->MaxLog2;
595 if (level < texObj->BaseLevel || level > maxLevel
596 || level > RADEON_MIPTREE_MAX_TEXTURE_LEVELS)
597 return 0;
598
599 const unsigned levelDiff = level - texObj->BaseLevel;
600 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
601 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
602 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
603
604 return (texImage->Width == refWidth &&
605 texImage->Height == refHeight &&
606 texImage->Depth == refDepth);
607 }
608
609 static void teximage_assign_miptree(radeonContextPtr rmesa,
610 struct gl_texture_object *texObj,
611 struct gl_texture_image *texImage,
612 unsigned face,
613 unsigned level)
614 {
615 radeonTexObj *t = radeon_tex_obj(texObj);
616 radeon_texture_image* image = get_radeon_texture_image(texImage);
617
618 /* check image for dimension and level compatibility with texture */
619 if (!image_matches_texture_obj(texObj, texImage, level))
620 return;
621
622 /* Try using current miptree, or create new if there isn't any */
623 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
624 radeon_miptree_unreference(&t->mt);
625 radeon_try_alloc_miptree(rmesa, t);
626 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
627 "%s: texObj %p, texImage %p, face %d, level %d, "
628 "texObj miptree doesn't match, allocated new miptree %p\n",
629 __FUNCTION__, texObj, texImage, face, level, t->mt);
630 }
631
632 /* Miptree alocation may have failed,
633 * when there was no image for baselevel specified */
634 if (t->mt) {
635 image->mtface = face;
636 image->mtlevel = level;
637 radeon_miptree_reference(t->mt, &image->mt);
638 } else
639 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
640 "%s Failed to allocate miptree.\n", __func__);
641 }
642
643 static GLuint * allocate_image_offsets(GLcontext *ctx,
644 unsigned alignedWidth,
645 unsigned height,
646 unsigned depth)
647 {
648 int i;
649 GLuint *offsets;
650
651 offsets = malloc(depth * sizeof(GLuint)) ;
652 if (!offsets) {
653 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
654 return NULL;
655 }
656
657 for (i = 0; i < depth; ++i) {
658 offsets[i] = alignedWidth * height * i;
659 }
660
661 return offsets;
662 }
663
664 /**
665 * Update a subregion of the given texture image.
666 */
667 static void radeon_store_teximage(GLcontext* ctx, int dims,
668 GLint xoffset, GLint yoffset, GLint zoffset,
669 GLsizei width, GLsizei height, GLsizei depth,
670 GLsizei imageSize,
671 GLenum format, GLenum type,
672 const GLvoid * pixels,
673 const struct gl_pixelstore_attrib *packing,
674 struct gl_texture_object *texObj,
675 struct gl_texture_image *texImage,
676 int compressed)
677 {
678 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
679 radeonTexObj *t = radeon_tex_obj(texObj);
680 radeon_texture_image* image = get_radeon_texture_image(texImage);
681
682 GLuint dstRowStride;
683 GLuint *dstImageOffsets;
684
685 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
686 "%s(%p, tex %p, image %p) compressed %d\n",
687 __func__, ctx, texObj, texImage, compressed);
688
689 if (image->mt) {
690 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
691 } else if (t->bo) {
692 /* TFP case */
693 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
694 } else {
695 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
696 }
697
698 assert(dstRowStride);
699
700 if (dims == 3) {
701 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
702 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
703 if (!dstImageOffsets) {
704 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
705 return;
706 }
707 } else {
708 dstImageOffsets = texImage->ImageOffsets;
709 }
710
711 radeon_teximage_map(image, GL_TRUE);
712
713 if (compressed) {
714 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
715 GLubyte *img_start;
716
717 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
718
719 if (!image->mt) {
720 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
721 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
722 texImage->TexFormat,
723 texImage->Width, texImage->Data);
724 }
725 else {
726 uint32_t offset;
727 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
728 offset *= _mesa_get_format_bytes(texImage->TexFormat);
729 img_start = texImage->Data + offset;
730 }
731 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
732 bytesPerRow = srcRowStride;
733 rows = (height + block_height - 1) / block_height;
734
735 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
736 }
737 else {
738 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
739 texImage->TexFormat, texImage->Data,
740 xoffset, yoffset, zoffset,
741 dstRowStride,
742 dstImageOffsets,
743 width, height, depth,
744 format, type, pixels, packing)) {
745 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
746 }
747 }
748
749 if (dims == 3) {
750 free(dstImageOffsets);
751 }
752
753 radeon_teximage_unmap(image);
754 }
755
756 /**
757 * All glTexImage calls go through this function.
758 */
759 static void radeon_teximage(
760 GLcontext *ctx, int dims,
761 GLenum target, GLint level,
762 GLint internalFormat,
763 GLint width, GLint height, GLint depth,
764 GLsizei imageSize,
765 GLenum format, GLenum type, const GLvoid * pixels,
766 const struct gl_pixelstore_attrib *packing,
767 struct gl_texture_object *texObj,
768 struct gl_texture_image *texImage,
769 int compressed)
770 {
771 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
772 radeonTexObj* t = radeon_tex_obj(texObj);
773 radeon_texture_image* image = get_radeon_texture_image(texImage);
774 GLint postConvWidth = width;
775 GLint postConvHeight = height;
776 GLuint face = _mesa_tex_target_to_face(target);
777
778 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
779 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
780 __func__, dims, texObj, texImage, face, level);
781 {
782 struct radeon_bo *bo;
783 bo = !image->mt ? image->bo : image->mt->bo;
784 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
785 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
786 "%s Calling teximage for texture that is "
787 "queued for GPU processing.\n",
788 __func__);
789 radeon_firevertices(rmesa);
790 }
791 }
792
793
794 t->validated = GL_FALSE;
795
796 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
797 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
798 &postConvHeight);
799 }
800
801 if (!_mesa_is_format_compressed(texImage->TexFormat)) {
802 GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
803 /* Minimum pitch of 32 bytes */
804 if (postConvWidth * texelBytes < 32) {
805 postConvWidth = 32 / texelBytes;
806 texImage->RowStride = postConvWidth;
807 }
808 if (!image->mt) {
809 assert(texImage->RowStride == postConvWidth);
810 }
811 }
812
813 /* Mesa core only clears texImage->Data but not image->mt */
814 radeonFreeTexImageData(ctx, texImage);
815
816 if (!t->bo) {
817 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
818 if (!image->mt) {
819 int size = _mesa_format_image_size(texImage->TexFormat,
820 texImage->Width,
821 texImage->Height,
822 texImage->Depth);
823 texImage->Data = _mesa_alloc_texmemory(size);
824 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
825 "%s %dd: texObj %p, texImage %p, "
826 " no miptree assigned, using local memory %p\n",
827 __func__, dims, texObj, texImage, texImage->Data);
828 }
829 }
830
831 /* Upload texture image; note that the spec allows pixels to be NULL */
832 if (compressed) {
833 pixels = _mesa_validate_pbo_compressed_teximage(
834 ctx, imageSize, pixels, packing, "glCompressedTexImage");
835 } else {
836 pixels = _mesa_validate_pbo_teximage(
837 ctx, dims, width, height, depth,
838 format, type, pixels, packing, "glTexImage");
839 }
840
841 if (pixels) {
842 radeon_store_teximage(ctx, dims,
843 0, 0, 0,
844 width, height, depth,
845 imageSize, format, type,
846 pixels, packing,
847 texObj, texImage,
848 compressed);
849 }
850
851 _mesa_unmap_teximage_pbo(ctx, packing);
852 }
853
854 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
855 GLint internalFormat,
856 GLint width, GLint border,
857 GLenum format, GLenum type, const GLvoid * pixels,
858 const struct gl_pixelstore_attrib *packing,
859 struct gl_texture_object *texObj,
860 struct gl_texture_image *texImage)
861 {
862 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
863 0, format, type, pixels, packing, texObj, texImage, 0);
864 }
865
866 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
867 GLint internalFormat,
868 GLint width, GLint height, GLint border,
869 GLenum format, GLenum type, const GLvoid * pixels,
870 const struct gl_pixelstore_attrib *packing,
871 struct gl_texture_object *texObj,
872 struct gl_texture_image *texImage)
873
874 {
875 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
876 0, format, type, pixels, packing, texObj, texImage, 0);
877 }
878
879 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
880 GLint level, GLint internalFormat,
881 GLint width, GLint height, GLint border,
882 GLsizei imageSize, const GLvoid * data,
883 struct gl_texture_object *texObj,
884 struct gl_texture_image *texImage)
885 {
886 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
887 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
888 }
889
890 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
891 GLint internalFormat,
892 GLint width, GLint height, GLint depth,
893 GLint border,
894 GLenum format, GLenum type, const GLvoid * pixels,
895 const struct gl_pixelstore_attrib *packing,
896 struct gl_texture_object *texObj,
897 struct gl_texture_image *texImage)
898 {
899 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
900 0, format, type, pixels, packing, texObj, texImage, 0);
901 }
902
903 /**
904 * All glTexSubImage calls go through this function.
905 */
906 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
907 GLint xoffset, GLint yoffset, GLint zoffset,
908 GLsizei width, GLsizei height, GLsizei depth,
909 GLsizei imageSize,
910 GLenum format, GLenum type,
911 const GLvoid * pixels,
912 const struct gl_pixelstore_attrib *packing,
913 struct gl_texture_object *texObj,
914 struct gl_texture_image *texImage,
915 int compressed)
916 {
917 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
918 radeonTexObj* t = radeon_tex_obj(texObj);
919 radeon_texture_image* image = get_radeon_texture_image(texImage);
920
921 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
922 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
923 __func__, dims, texObj, texImage,
924 _mesa_tex_target_to_face(target), level);
925 {
926 struct radeon_bo *bo;
927 bo = !image->mt ? image->bo : image->mt->bo;
928 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
929 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
930 "%s Calling texsubimage for texture that is "
931 "queued for GPU processing.\n",
932 __func__);
933 radeon_firevertices(rmesa);
934 }
935 }
936
937
938 t->validated = GL_FALSE;
939 if (compressed) {
940 pixels = _mesa_validate_pbo_compressed_teximage(
941 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
942 } else {
943 pixels = _mesa_validate_pbo_teximage(ctx, dims,
944 width, height, depth, format, type, pixels, packing, "glTexSubImage");
945 }
946
947 if (pixels) {
948 radeon_store_teximage(ctx, dims,
949 xoffset, yoffset, zoffset,
950 width, height, depth,
951 imageSize, format, type,
952 pixels, packing,
953 texObj, texImage,
954 compressed);
955 }
956
957 _mesa_unmap_teximage_pbo(ctx, packing);
958 }
959
960 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
961 GLint xoffset,
962 GLsizei width,
963 GLenum format, GLenum type,
964 const GLvoid * pixels,
965 const struct gl_pixelstore_attrib *packing,
966 struct gl_texture_object *texObj,
967 struct gl_texture_image *texImage)
968 {
969 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
970 format, type, pixels, packing, texObj, texImage, 0);
971 }
972
973 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
974 GLint xoffset, GLint yoffset,
975 GLsizei width, GLsizei height,
976 GLenum format, GLenum type,
977 const GLvoid * pixels,
978 const struct gl_pixelstore_attrib *packing,
979 struct gl_texture_object *texObj,
980 struct gl_texture_image *texImage)
981 {
982 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
983 0, format, type, pixels, packing, texObj, texImage,
984 0);
985 }
986
987 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
988 GLint level, GLint xoffset,
989 GLint yoffset, GLsizei width,
990 GLsizei height, GLenum format,
991 GLsizei imageSize, const GLvoid * data,
992 struct gl_texture_object *texObj,
993 struct gl_texture_image *texImage)
994 {
995 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
996 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
997 }
998
999
1000 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
1001 GLint xoffset, GLint yoffset, GLint zoffset,
1002 GLsizei width, GLsizei height, GLsizei depth,
1003 GLenum format, GLenum type,
1004 const GLvoid * pixels,
1005 const struct gl_pixelstore_attrib *packing,
1006 struct gl_texture_object *texObj,
1007 struct gl_texture_image *texImage)
1008 {
1009 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
1010 format, type, pixels, packing, texObj, texImage, 0);
1011 }
1012
1013 unsigned radeonIsFormatRenderable(gl_format mesa_format)
1014 {
1015 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
1016 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1017 return 1;
1018
1019 switch (mesa_format)
1020 {
1021 case MESA_FORMAT_Z16:
1022 case MESA_FORMAT_S8_Z24:
1023 return 1;
1024 default:
1025 return 0;
1026 }
1027 }