Merge branch 'gallium-newclear'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/convolve.h"
36 #include "main/enums.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42 #include "drivers/common/meta.h"
43
44 #include "xmlpool.h" /* for symbolic values of enum-type options */
45
46 #include "radeon_common.h"
47
48 #include "radeon_mipmap_tree.h"
49
50
51 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
52 GLuint numrows, GLuint rowsize)
53 {
54 assert(rowsize <= dststride);
55 assert(rowsize <= srcstride);
56
57 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
58 "%s dst %p, stride %u, src %p, stride %u, "
59 "numrows %u, rowsize %u.\n",
60 __func__, dst, dststride,
61 src, srcstride,
62 numrows, rowsize);
63
64 if (rowsize == srcstride && rowsize == dststride) {
65 memcpy(dst, src, numrows*rowsize);
66 } else {
67 GLuint i;
68 for(i = 0; i < numrows; ++i) {
69 memcpy(dst, src, rowsize);
70 dst += dststride;
71 src += srcstride;
72 }
73 }
74 }
75
76 /* textures */
77 /**
78 * Allocate an empty texture image object.
79 */
80 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
81 {
82 return CALLOC(sizeof(radeon_texture_image));
83 }
84
85 /**
86 * Free memory associated with this texture image.
87 */
88 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
89 {
90 radeon_texture_image* image = get_radeon_texture_image(timage);
91
92 if (image->mt) {
93 radeon_miptree_unreference(&image->mt);
94 assert(!image->base.Data);
95 } else {
96 _mesa_free_texture_image_data(ctx, timage);
97 }
98 if (image->bo) {
99 radeon_bo_unref(image->bo);
100 image->bo = NULL;
101 }
102 if (timage->Data) {
103 _mesa_free_texmemory(timage->Data);
104 timage->Data = NULL;
105 }
106 }
107
108 /* Set Data pointer and additional data for mapped texture image */
109 static void teximage_set_map_data(radeon_texture_image *image)
110 {
111 radeon_mipmap_level *lvl;
112
113 if (!image->mt) {
114 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
115 __func__, image);
116
117 return;
118 }
119
120 lvl = &image->mt->levels[image->mtlevel];
121
122 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
123 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
124 }
125
126
127 /**
128 * Map a single texture image for glTexImage and friends.
129 */
130 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
131 {
132 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
133 "%s(img %p), write_enable %s.\n",
134 __func__, image,
135 write_enable ? "true": "false");
136 if (image->mt) {
137 assert(!image->base.Data);
138
139 radeon_bo_map(image->mt->bo, write_enable);
140 teximage_set_map_data(image);
141 }
142 }
143
144
145 void radeon_teximage_unmap(radeon_texture_image *image)
146 {
147 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
148 "%s(img %p)\n",
149 __func__, image);
150 if (image->mt) {
151 assert(image->base.Data);
152
153 image->base.Data = 0;
154 radeon_bo_unmap(image->mt->bo);
155 }
156 }
157
158 static void map_override(GLcontext *ctx, radeonTexObj *t)
159 {
160 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
161
162 radeon_bo_map(t->bo, GL_FALSE);
163
164 img->base.Data = t->bo->ptr;
165 }
166
167 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
168 {
169 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
170
171 radeon_bo_unmap(t->bo);
172
173 img->base.Data = NULL;
174 }
175
176 /**
177 * Map a validated texture for reading during software rendering.
178 */
179 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
180 {
181 radeonTexObj* t = radeon_tex_obj(texObj);
182 int face, level;
183
184 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
185 "%s(%p, tex %p)\n",
186 __func__, ctx, texObj);
187
188 if (!radeon_validate_texture_miptree(ctx, texObj)) {
189 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
190 "sw fallback.\n",
191 __func__, ctx, texObj);
192 return;
193 }
194
195 if (t->image_override && t->bo) {
196 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
197 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
198 __func__, ctx, texObj);
199
200 map_override(ctx, t);
201 }
202
203 /* for r100 3D sw fallbacks don't have mt */
204 if (!t->mt) {
205 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
206 __func__, ctx, texObj);
207 return;
208 }
209
210 radeon_bo_map(t->mt->bo, GL_FALSE);
211 for(face = 0; face < t->mt->faces; ++face) {
212 for(level = t->minLod; level <= t->maxLod; ++level)
213 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
214 }
215 }
216
217 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
218 {
219 radeonTexObj* t = radeon_tex_obj(texObj);
220 int face, level;
221
222 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
223 "%s(%p, tex %p)\n",
224 __func__, ctx, texObj);
225
226 if (t->image_override && t->bo)
227 unmap_override(ctx, t);
228 /* for r100 3D sw fallbacks don't have mt */
229 if (!t->mt)
230 return;
231
232 for(face = 0; face < t->mt->faces; ++face) {
233 for(level = t->minLod; level <= t->maxLod; ++level)
234 texObj->Image[face][level]->Data = 0;
235 }
236 radeon_bo_unmap(t->mt->bo);
237 }
238
239 /**
240 * Wraps Mesa's implementation to ensure that the base level image is mapped.
241 *
242 * This relies on internal details of _mesa_generate_mipmap, in particular
243 * the fact that the memory for recreated texture images is always freed.
244 */
245 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
246 struct gl_texture_object *texObj)
247 {
248 radeonTexObj* t = radeon_tex_obj(texObj);
249 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
250 int i, face;
251
252 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
253 "%s(%p, tex %p) Target type %s.\n",
254 __func__, ctx, texObj,
255 _mesa_lookup_enum_by_nr(target));
256
257 _mesa_generate_mipmap(ctx, target, texObj);
258
259 for (face = 0; face < nr_faces; face++) {
260 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
261 radeon_texture_image *image;
262
263 image = get_radeon_texture_image(texObj->Image[face][i]);
264
265 if (image == NULL)
266 break;
267
268 image->mtlevel = i;
269 image->mtface = face;
270
271 radeon_miptree_unreference(&image->mt);
272 }
273 }
274
275 }
276
277 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
278 {
279 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
280 struct radeon_bo *bo;
281 GLuint face = _mesa_tex_target_to_face(target);
282 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
283 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
284
285 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
286 "%s(%p, target %s, tex %p)\n",
287 __func__, ctx, _mesa_lookup_enum_by_nr(target),
288 texObj);
289
290 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
291 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
292 "%s(%p, tex %p) Trying to generate mipmap for texture "
293 "in processing by GPU.\n",
294 __func__, ctx, texObj);
295 radeon_firevertices(rmesa);
296 }
297
298 if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
299 radeon_teximage_map(baseimage, GL_FALSE);
300 radeon_generate_mipmap(ctx, target, texObj);
301 radeon_teximage_unmap(baseimage);
302 } else {
303 _mesa_meta_GenerateMipmap(ctx, target, texObj);
304 }
305 }
306
307
308 /* try to find a format which will only need a memcopy */
309 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
310 GLenum srcFormat,
311 GLenum srcType, GLboolean fbo)
312 {
313 const GLuint ui = 1;
314 const GLubyte littleEndian = *((const GLubyte *)&ui);
315
316 /* r100 can only do this */
317 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
318 return _dri_texformat_argb8888;
319
320 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
321 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
322 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
324 return MESA_FORMAT_RGBA8888;
325 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
326 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
327 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
328 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
329 return MESA_FORMAT_RGBA8888_REV;
330 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
331 return _dri_texformat_argb8888;
332 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
333 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
334 return MESA_FORMAT_ARGB8888_REV;
335 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
336 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
337 return MESA_FORMAT_ARGB8888;
338 } else
339 return _dri_texformat_argb8888;
340 }
341
342 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
343 GLint internalFormat,
344 GLenum format,
345 GLenum type)
346 {
347 return radeonChooseTextureFormat(ctx, internalFormat, format,
348 type, 0);
349 }
350
351 gl_format radeonChooseTextureFormat(GLcontext * ctx,
352 GLint internalFormat,
353 GLenum format,
354 GLenum type, GLboolean fbo)
355 {
356 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
357 const GLboolean do32bpt =
358 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
359 const GLboolean force16bpt =
360 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
361 (void)format;
362
363 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
364 "%s InternalFormat=%s(%d) type=%s format=%s\n",
365 __func__,
366 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
367 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
368 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
369 "%s do32bpt=%d force16bpt=%d\n",
370 __func__, do32bpt, force16bpt);
371
372 switch (internalFormat) {
373 case 4:
374 case GL_RGBA:
375 case GL_COMPRESSED_RGBA:
376 switch (type) {
377 case GL_UNSIGNED_INT_10_10_10_2:
378 case GL_UNSIGNED_INT_2_10_10_10_REV:
379 return do32bpt ? _dri_texformat_argb8888 :
380 _dri_texformat_argb1555;
381 case GL_UNSIGNED_SHORT_4_4_4_4:
382 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
383 return _dri_texformat_argb4444;
384 case GL_UNSIGNED_SHORT_5_5_5_1:
385 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
386 return _dri_texformat_argb1555;
387 default:
388 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
389 _dri_texformat_argb4444;
390 }
391
392 case 3:
393 case GL_RGB:
394 case GL_COMPRESSED_RGB:
395 switch (type) {
396 case GL_UNSIGNED_SHORT_4_4_4_4:
397 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
398 return _dri_texformat_argb4444;
399 case GL_UNSIGNED_SHORT_5_5_5_1:
400 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
401 return _dri_texformat_argb1555;
402 case GL_UNSIGNED_SHORT_5_6_5:
403 case GL_UNSIGNED_SHORT_5_6_5_REV:
404 return _dri_texformat_rgb565;
405 default:
406 return do32bpt ? _dri_texformat_argb8888 :
407 _dri_texformat_rgb565;
408 }
409
410 case GL_RGBA8:
411 case GL_RGB10_A2:
412 case GL_RGBA12:
413 case GL_RGBA16:
414 return !force16bpt ?
415 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
416 _dri_texformat_argb4444;
417
418 case GL_RGBA4:
419 case GL_RGBA2:
420 return _dri_texformat_argb4444;
421
422 case GL_RGB5_A1:
423 return _dri_texformat_argb1555;
424
425 case GL_RGB8:
426 case GL_RGB10:
427 case GL_RGB12:
428 case GL_RGB16:
429 return !force16bpt ? _dri_texformat_argb8888 :
430 _dri_texformat_rgb565;
431
432 case GL_RGB5:
433 case GL_RGB4:
434 case GL_R3_G3_B2:
435 return _dri_texformat_rgb565;
436
437 case GL_ALPHA:
438 case GL_ALPHA4:
439 case GL_ALPHA8:
440 case GL_ALPHA12:
441 case GL_ALPHA16:
442 case GL_COMPRESSED_ALPHA:
443 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
444 in wrong rgb values (same as alpha value instead of 0). */
445 if (IS_R200_CLASS(rmesa->radeonScreen))
446 return _dri_texformat_al88;
447 else
448 return _dri_texformat_a8;
449 case 1:
450 case GL_LUMINANCE:
451 case GL_LUMINANCE4:
452 case GL_LUMINANCE8:
453 case GL_LUMINANCE12:
454 case GL_LUMINANCE16:
455 case GL_COMPRESSED_LUMINANCE:
456 return _dri_texformat_l8;
457
458 case 2:
459 case GL_LUMINANCE_ALPHA:
460 case GL_LUMINANCE4_ALPHA4:
461 case GL_LUMINANCE6_ALPHA2:
462 case GL_LUMINANCE8_ALPHA8:
463 case GL_LUMINANCE12_ALPHA4:
464 case GL_LUMINANCE12_ALPHA12:
465 case GL_LUMINANCE16_ALPHA16:
466 case GL_COMPRESSED_LUMINANCE_ALPHA:
467 return _dri_texformat_al88;
468
469 case GL_INTENSITY:
470 case GL_INTENSITY4:
471 case GL_INTENSITY8:
472 case GL_INTENSITY12:
473 case GL_INTENSITY16:
474 case GL_COMPRESSED_INTENSITY:
475 return _dri_texformat_i8;
476
477 case GL_YCBCR_MESA:
478 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
479 type == GL_UNSIGNED_BYTE)
480 return MESA_FORMAT_YCBCR;
481 else
482 return MESA_FORMAT_YCBCR_REV;
483
484 case GL_RGB_S3TC:
485 case GL_RGB4_S3TC:
486 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
487 return MESA_FORMAT_RGB_DXT1;
488
489 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
490 return MESA_FORMAT_RGBA_DXT1;
491
492 case GL_RGBA_S3TC:
493 case GL_RGBA4_S3TC:
494 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
495 return MESA_FORMAT_RGBA_DXT3;
496
497 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
498 return MESA_FORMAT_RGBA_DXT5;
499
500 case GL_ALPHA16F_ARB:
501 return MESA_FORMAT_ALPHA_FLOAT16;
502 case GL_ALPHA32F_ARB:
503 return MESA_FORMAT_ALPHA_FLOAT32;
504 case GL_LUMINANCE16F_ARB:
505 return MESA_FORMAT_LUMINANCE_FLOAT16;
506 case GL_LUMINANCE32F_ARB:
507 return MESA_FORMAT_LUMINANCE_FLOAT32;
508 case GL_LUMINANCE_ALPHA16F_ARB:
509 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
510 case GL_LUMINANCE_ALPHA32F_ARB:
511 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
512 case GL_INTENSITY16F_ARB:
513 return MESA_FORMAT_INTENSITY_FLOAT16;
514 case GL_INTENSITY32F_ARB:
515 return MESA_FORMAT_INTENSITY_FLOAT32;
516 case GL_RGB16F_ARB:
517 return MESA_FORMAT_RGBA_FLOAT16;
518 case GL_RGB32F_ARB:
519 return MESA_FORMAT_RGBA_FLOAT32;
520 case GL_RGBA16F_ARB:
521 return MESA_FORMAT_RGBA_FLOAT16;
522 case GL_RGBA32F_ARB:
523 return MESA_FORMAT_RGBA_FLOAT32;
524
525 #ifdef RADEON_R300
526 case GL_DEPTH_COMPONENT:
527 case GL_DEPTH_COMPONENT16:
528 return MESA_FORMAT_Z16;
529 case GL_DEPTH_COMPONENT24:
530 case GL_DEPTH_COMPONENT32:
531 case GL_DEPTH_STENCIL_EXT:
532 case GL_DEPTH24_STENCIL8_EXT:
533 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
534 return MESA_FORMAT_S8_Z24;
535 else
536 return MESA_FORMAT_Z16;
537 #else
538 case GL_DEPTH_COMPONENT:
539 case GL_DEPTH_COMPONENT16:
540 case GL_DEPTH_COMPONENT24:
541 case GL_DEPTH_COMPONENT32:
542 case GL_DEPTH_STENCIL_EXT:
543 case GL_DEPTH24_STENCIL8_EXT:
544 return MESA_FORMAT_S8_Z24;
545 #endif
546
547 /* EXT_texture_sRGB */
548 case GL_SRGB:
549 case GL_SRGB8:
550 case GL_SRGB_ALPHA:
551 case GL_SRGB8_ALPHA8:
552 case GL_COMPRESSED_SRGB:
553 case GL_COMPRESSED_SRGB_ALPHA:
554 return MESA_FORMAT_SRGBA8;
555
556 case GL_SLUMINANCE:
557 case GL_SLUMINANCE8:
558 case GL_COMPRESSED_SLUMINANCE:
559 return MESA_FORMAT_SL8;
560
561 case GL_SLUMINANCE_ALPHA:
562 case GL_SLUMINANCE8_ALPHA8:
563 case GL_COMPRESSED_SLUMINANCE_ALPHA:
564 return MESA_FORMAT_SLA8;
565
566 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
567 return MESA_FORMAT_SRGB_DXT1;
568 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
569 return MESA_FORMAT_SRGBA_DXT1;
570 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
571 return MESA_FORMAT_SRGBA_DXT3;
572 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
573 return MESA_FORMAT_SRGBA_DXT5;
574
575 default:
576 _mesa_problem(ctx,
577 "unexpected internalFormat 0x%x in %s",
578 (int)internalFormat, __func__);
579 return MESA_FORMAT_NONE;
580 }
581
582 return MESA_FORMAT_NONE; /* never get here */
583 }
584
585 /** Check if given image is valid within current texture object.
586 */
587 static int image_matches_texture_obj(struct gl_texture_object *texObj,
588 struct gl_texture_image *texImage,
589 unsigned level)
590 {
591 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
592
593 if (!baseImage)
594 return 0;
595
596 /* Check image level against object BaseLevel, but not MaxLevel. MaxLevel is not
597 * the highest level that can be assigned to the miptree.
598 */
599 const unsigned maxLevel = texObj->BaseLevel + baseImage->MaxLog2;
600 if (level < texObj->BaseLevel || level > maxLevel
601 || level > RADEON_MIPTREE_MAX_TEXTURE_LEVELS)
602 return 0;
603
604 const unsigned levelDiff = level - texObj->BaseLevel;
605 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
606 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
607 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
608
609 return (texImage->Width == refWidth &&
610 texImage->Height == refHeight &&
611 texImage->Depth == refDepth);
612 }
613
614 static void teximage_assign_miptree(radeonContextPtr rmesa,
615 struct gl_texture_object *texObj,
616 struct gl_texture_image *texImage,
617 unsigned face,
618 unsigned level)
619 {
620 radeonTexObj *t = radeon_tex_obj(texObj);
621 radeon_texture_image* image = get_radeon_texture_image(texImage);
622
623 /* check image for dimension and level compatibility with texture */
624 if (!image_matches_texture_obj(texObj, texImage, level))
625 return;
626
627 /* Try using current miptree, or create new if there isn't any */
628 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
629 radeon_miptree_unreference(&t->mt);
630 radeon_try_alloc_miptree(rmesa, t);
631 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
632 "%s: texObj %p, texImage %p, face %d, level %d, "
633 "texObj miptree doesn't match, allocated new miptree %p\n",
634 __FUNCTION__, texObj, texImage, face, level, t->mt);
635 }
636
637 /* Miptree alocation may have failed,
638 * when there was no image for baselevel specified */
639 if (t->mt) {
640 image->mtface = face;
641 image->mtlevel = level;
642 radeon_miptree_reference(t->mt, &image->mt);
643 } else
644 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
645 "%s Failed to allocate miptree.\n", __func__);
646 }
647
648 static GLuint * allocate_image_offsets(GLcontext *ctx,
649 unsigned alignedWidth,
650 unsigned height,
651 unsigned depth)
652 {
653 int i;
654 GLuint *offsets;
655
656 offsets = malloc(depth * sizeof(GLuint)) ;
657 if (!offsets) {
658 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
659 return NULL;
660 }
661
662 for (i = 0; i < depth; ++i) {
663 offsets[i] = alignedWidth * height * i;
664 }
665
666 return offsets;
667 }
668
669 /**
670 * Update a subregion of the given texture image.
671 */
672 static void radeon_store_teximage(GLcontext* ctx, int dims,
673 GLint xoffset, GLint yoffset, GLint zoffset,
674 GLsizei width, GLsizei height, GLsizei depth,
675 GLsizei imageSize,
676 GLenum format, GLenum type,
677 const GLvoid * pixels,
678 const struct gl_pixelstore_attrib *packing,
679 struct gl_texture_object *texObj,
680 struct gl_texture_image *texImage,
681 int compressed)
682 {
683 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
684 radeonTexObj *t = radeon_tex_obj(texObj);
685 radeon_texture_image* image = get_radeon_texture_image(texImage);
686
687 GLuint dstRowStride;
688 GLuint *dstImageOffsets;
689
690 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
691 "%s(%p, tex %p, image %p) compressed %d\n",
692 __func__, ctx, texObj, texImage, compressed);
693
694 if (image->mt) {
695 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
696 } else if (t->bo) {
697 /* TFP case */
698 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
699 } else {
700 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
701 }
702
703 assert(dstRowStride);
704
705 if (dims == 3) {
706 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
707 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
708 if (!dstImageOffsets) {
709 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
710 return;
711 }
712 } else {
713 dstImageOffsets = texImage->ImageOffsets;
714 }
715
716 radeon_teximage_map(image, GL_TRUE);
717
718 if (compressed) {
719 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
720 GLubyte *img_start;
721
722 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
723
724 if (!image->mt) {
725 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
726 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
727 texImage->TexFormat,
728 texImage->Width, texImage->Data);
729 }
730 else {
731 uint32_t offset;
732 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
733 offset *= _mesa_get_format_bytes(texImage->TexFormat);
734 img_start = texImage->Data + offset;
735 }
736 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
737 bytesPerRow = srcRowStride;
738 rows = (height + block_height - 1) / block_height;
739
740 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
741 }
742 else {
743 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
744 texImage->TexFormat, texImage->Data,
745 xoffset, yoffset, zoffset,
746 dstRowStride,
747 dstImageOffsets,
748 width, height, depth,
749 format, type, pixels, packing)) {
750 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
751 }
752 }
753
754 if (dims == 3) {
755 free(dstImageOffsets);
756 }
757
758 radeon_teximage_unmap(image);
759 }
760
761 /**
762 * All glTexImage calls go through this function.
763 */
764 static void radeon_teximage(
765 GLcontext *ctx, int dims,
766 GLenum target, GLint level,
767 GLint internalFormat,
768 GLint width, GLint height, GLint depth,
769 GLsizei imageSize,
770 GLenum format, GLenum type, const GLvoid * pixels,
771 const struct gl_pixelstore_attrib *packing,
772 struct gl_texture_object *texObj,
773 struct gl_texture_image *texImage,
774 int compressed)
775 {
776 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
777 radeonTexObj* t = radeon_tex_obj(texObj);
778 radeon_texture_image* image = get_radeon_texture_image(texImage);
779 GLint postConvWidth = width;
780 GLint postConvHeight = height;
781 GLuint face = _mesa_tex_target_to_face(target);
782
783 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
784 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
785 __func__, dims, texObj, texImage, face, level);
786 {
787 struct radeon_bo *bo;
788 bo = !image->mt ? image->bo : image->mt->bo;
789 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
790 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
791 "%s Calling teximage for texture that is "
792 "queued for GPU processing.\n",
793 __func__);
794 radeon_firevertices(rmesa);
795 }
796 }
797
798
799 t->validated = GL_FALSE;
800
801 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
802 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
803 &postConvHeight);
804 }
805
806 if (!_mesa_is_format_compressed(texImage->TexFormat)) {
807 GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
808 /* Minimum pitch of 32 bytes */
809 if (postConvWidth * texelBytes < 32) {
810 postConvWidth = 32 / texelBytes;
811 texImage->RowStride = postConvWidth;
812 }
813 if (!image->mt) {
814 assert(texImage->RowStride == postConvWidth);
815 }
816 }
817
818 /* Mesa core only clears texImage->Data but not image->mt */
819 radeonFreeTexImageData(ctx, texImage);
820
821 if (!t->bo) {
822 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
823 if (!image->mt) {
824 int size = _mesa_format_image_size(texImage->TexFormat,
825 texImage->Width,
826 texImage->Height,
827 texImage->Depth);
828 texImage->Data = _mesa_alloc_texmemory(size);
829 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
830 "%s %dd: texObj %p, texImage %p, "
831 " no miptree assigned, using local memory %p\n",
832 __func__, dims, texObj, texImage, texImage->Data);
833 }
834 }
835
836 /* Upload texture image; note that the spec allows pixels to be NULL */
837 if (compressed) {
838 pixels = _mesa_validate_pbo_compressed_teximage(
839 ctx, imageSize, pixels, packing, "glCompressedTexImage");
840 } else {
841 pixels = _mesa_validate_pbo_teximage(
842 ctx, dims, width, height, depth,
843 format, type, pixels, packing, "glTexImage");
844 }
845
846 if (pixels) {
847 radeon_store_teximage(ctx, dims,
848 0, 0, 0,
849 width, height, depth,
850 imageSize, format, type,
851 pixels, packing,
852 texObj, texImage,
853 compressed);
854 }
855
856 _mesa_unmap_teximage_pbo(ctx, packing);
857 }
858
859 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
860 GLint internalFormat,
861 GLint width, GLint border,
862 GLenum format, GLenum type, const GLvoid * pixels,
863 const struct gl_pixelstore_attrib *packing,
864 struct gl_texture_object *texObj,
865 struct gl_texture_image *texImage)
866 {
867 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
868 0, format, type, pixels, packing, texObj, texImage, 0);
869 }
870
871 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
872 GLint internalFormat,
873 GLint width, GLint height, GLint border,
874 GLenum format, GLenum type, const GLvoid * pixels,
875 const struct gl_pixelstore_attrib *packing,
876 struct gl_texture_object *texObj,
877 struct gl_texture_image *texImage)
878
879 {
880 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
881 0, format, type, pixels, packing, texObj, texImage, 0);
882 }
883
884 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
885 GLint level, GLint internalFormat,
886 GLint width, GLint height, GLint border,
887 GLsizei imageSize, const GLvoid * data,
888 struct gl_texture_object *texObj,
889 struct gl_texture_image *texImage)
890 {
891 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
892 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
893 }
894
895 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
896 GLint internalFormat,
897 GLint width, GLint height, GLint depth,
898 GLint border,
899 GLenum format, GLenum type, const GLvoid * pixels,
900 const struct gl_pixelstore_attrib *packing,
901 struct gl_texture_object *texObj,
902 struct gl_texture_image *texImage)
903 {
904 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
905 0, format, type, pixels, packing, texObj, texImage, 0);
906 }
907
908 /**
909 * All glTexSubImage calls go through this function.
910 */
911 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
912 GLint xoffset, GLint yoffset, GLint zoffset,
913 GLsizei width, GLsizei height, GLsizei depth,
914 GLsizei imageSize,
915 GLenum format, GLenum type,
916 const GLvoid * pixels,
917 const struct gl_pixelstore_attrib *packing,
918 struct gl_texture_object *texObj,
919 struct gl_texture_image *texImage,
920 int compressed)
921 {
922 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
923 radeonTexObj* t = radeon_tex_obj(texObj);
924 radeon_texture_image* image = get_radeon_texture_image(texImage);
925
926 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
927 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
928 __func__, dims, texObj, texImage,
929 _mesa_tex_target_to_face(target), level);
930 {
931 struct radeon_bo *bo;
932 bo = !image->mt ? image->bo : image->mt->bo;
933 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
934 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
935 "%s Calling texsubimage for texture that is "
936 "queued for GPU processing.\n",
937 __func__);
938 radeon_firevertices(rmesa);
939 }
940 }
941
942
943 t->validated = GL_FALSE;
944 if (compressed) {
945 pixels = _mesa_validate_pbo_compressed_teximage(
946 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
947 } else {
948 pixels = _mesa_validate_pbo_teximage(ctx, dims,
949 width, height, depth, format, type, pixels, packing, "glTexSubImage");
950 }
951
952 if (pixels) {
953 radeon_store_teximage(ctx, dims,
954 xoffset, yoffset, zoffset,
955 width, height, depth,
956 imageSize, format, type,
957 pixels, packing,
958 texObj, texImage,
959 compressed);
960 }
961
962 _mesa_unmap_teximage_pbo(ctx, packing);
963 }
964
965 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
966 GLint xoffset,
967 GLsizei width,
968 GLenum format, GLenum type,
969 const GLvoid * pixels,
970 const struct gl_pixelstore_attrib *packing,
971 struct gl_texture_object *texObj,
972 struct gl_texture_image *texImage)
973 {
974 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
975 format, type, pixels, packing, texObj, texImage, 0);
976 }
977
978 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
979 GLint xoffset, GLint yoffset,
980 GLsizei width, GLsizei height,
981 GLenum format, GLenum type,
982 const GLvoid * pixels,
983 const struct gl_pixelstore_attrib *packing,
984 struct gl_texture_object *texObj,
985 struct gl_texture_image *texImage)
986 {
987 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
988 0, format, type, pixels, packing, texObj, texImage,
989 0);
990 }
991
992 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
993 GLint level, GLint xoffset,
994 GLint yoffset, GLsizei width,
995 GLsizei height, GLenum format,
996 GLsizei imageSize, const GLvoid * data,
997 struct gl_texture_object *texObj,
998 struct gl_texture_image *texImage)
999 {
1000 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
1001 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
1002 }
1003
1004
1005 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
1006 GLint xoffset, GLint yoffset, GLint zoffset,
1007 GLsizei width, GLsizei height, GLsizei depth,
1008 GLenum format, GLenum type,
1009 const GLvoid * pixels,
1010 const struct gl_pixelstore_attrib *packing,
1011 struct gl_texture_object *texObj,
1012 struct gl_texture_image *texImage)
1013 {
1014 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
1015 format, type, pixels, packing, texObj, texImage, 0);
1016 }
1017
1018 unsigned radeonIsFormatRenderable(gl_format mesa_format)
1019 {
1020 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
1021 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1022 return 1;
1023
1024 switch (mesa_format)
1025 {
1026 case MESA_FORMAT_Z16:
1027 case MESA_FORMAT_S8_Z24:
1028 return 1;
1029 default:
1030 return 0;
1031 }
1032 }