Merge branch '7.8'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/convolve.h"
36 #include "main/enums.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42
43 #include "xmlpool.h" /* for symbolic values of enum-type options */
44
45 #include "radeon_common.h"
46
47 #include "radeon_mipmap_tree.h"
48
49
50 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
51 GLuint numrows, GLuint rowsize)
52 {
53 assert(rowsize <= dststride);
54 assert(rowsize <= srcstride);
55
56 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
57 "%s dst %p, stride %u, src %p, stride %u, "
58 "numrows %u, rowsize %u.\n",
59 __func__, dst, dststride,
60 src, srcstride,
61 numrows, rowsize);
62
63 if (rowsize == srcstride && rowsize == dststride) {
64 memcpy(dst, src, numrows*rowsize);
65 } else {
66 GLuint i;
67 for(i = 0; i < numrows; ++i) {
68 memcpy(dst, src, rowsize);
69 dst += dststride;
70 src += srcstride;
71 }
72 }
73 }
74
75 /* textures */
76 /**
77 * Allocate an empty texture image object.
78 */
79 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
80 {
81 return CALLOC(sizeof(radeon_texture_image));
82 }
83
84 /**
85 * Free memory associated with this texture image.
86 */
87 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
88 {
89 radeon_texture_image* image = get_radeon_texture_image(timage);
90
91 if (image->mt) {
92 radeon_miptree_unreference(&image->mt);
93 assert(!image->base.Data);
94 } else {
95 _mesa_free_texture_image_data(ctx, timage);
96 }
97 if (image->bo) {
98 radeon_bo_unref(image->bo);
99 image->bo = NULL;
100 }
101 if (timage->Data) {
102 _mesa_free_texmemory(timage->Data);
103 timage->Data = NULL;
104 }
105 }
106
107 /* Set Data pointer and additional data for mapped texture image */
108 static void teximage_set_map_data(radeon_texture_image *image)
109 {
110 radeon_mipmap_level *lvl;
111
112 if (!image->mt) {
113 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
114 __func__, image);
115
116 return;
117 }
118
119 lvl = &image->mt->levels[image->mtlevel];
120
121 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
122 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
123 }
124
125
126 /**
127 * Map a single texture image for glTexImage and friends.
128 */
129 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
130 {
131 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
132 "%s(img %p), write_enable %s.\n",
133 __func__, image,
134 write_enable ? "true": "false");
135 if (image->mt) {
136 assert(!image->base.Data);
137
138 radeon_bo_map(image->mt->bo, write_enable);
139 teximage_set_map_data(image);
140 }
141 }
142
143
144 void radeon_teximage_unmap(radeon_texture_image *image)
145 {
146 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
147 "%s(img %p)\n",
148 __func__, image);
149 if (image->mt) {
150 assert(image->base.Data);
151
152 image->base.Data = 0;
153 radeon_bo_unmap(image->mt->bo);
154 }
155 }
156
157 static void map_override(GLcontext *ctx, radeonTexObj *t)
158 {
159 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
160
161 radeon_bo_map(t->bo, GL_FALSE);
162
163 img->base.Data = t->bo->ptr;
164 }
165
166 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
167 {
168 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
169
170 radeon_bo_unmap(t->bo);
171
172 img->base.Data = NULL;
173 }
174
175 /**
176 * Map a validated texture for reading during software rendering.
177 */
178 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
179 {
180 radeonTexObj* t = radeon_tex_obj(texObj);
181 int face, level;
182
183 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
184 "%s(%p, tex %p)\n",
185 __func__, ctx, texObj);
186
187 if (!radeon_validate_texture_miptree(ctx, texObj)) {
188 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
189 "sw fallback.\n",
190 __func__, ctx, texObj);
191 return;
192 }
193
194 if (t->image_override && t->bo) {
195 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
196 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
197 __func__, ctx, texObj);
198
199 map_override(ctx, t);
200 }
201
202 /* for r100 3D sw fallbacks don't have mt */
203 if (!t->mt) {
204 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
205 __func__, ctx, texObj);
206 return;
207 }
208
209 radeon_bo_map(t->mt->bo, GL_FALSE);
210 for(face = 0; face < t->mt->faces; ++face) {
211 for(level = t->minLod; level <= t->maxLod; ++level)
212 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
213 }
214 }
215
216 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
217 {
218 radeonTexObj* t = radeon_tex_obj(texObj);
219 int face, level;
220
221 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
222 "%s(%p, tex %p)\n",
223 __func__, ctx, texObj);
224
225 if (t->image_override && t->bo)
226 unmap_override(ctx, t);
227 /* for r100 3D sw fallbacks don't have mt */
228 if (!t->mt)
229 return;
230
231 for(face = 0; face < t->mt->faces; ++face) {
232 for(level = t->minLod; level <= t->maxLod; ++level)
233 texObj->Image[face][level]->Data = 0;
234 }
235 radeon_bo_unmap(t->mt->bo);
236 }
237
238 /**
239 * Wraps Mesa's implementation to ensure that the base level image is mapped.
240 *
241 * This relies on internal details of _mesa_generate_mipmap, in particular
242 * the fact that the memory for recreated texture images is always freed.
243 */
244 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
245 struct gl_texture_object *texObj)
246 {
247 radeonTexObj* t = radeon_tex_obj(texObj);
248 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
249 int i, face;
250
251 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
252 "%s(%p, tex %p) Target type %s.\n",
253 __func__, ctx, texObj,
254 _mesa_lookup_enum_by_nr(target));
255
256 _mesa_generate_mipmap(ctx, target, texObj);
257
258 for (face = 0; face < nr_faces; face++) {
259 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
260 radeon_texture_image *image;
261
262 image = get_radeon_texture_image(texObj->Image[face][i]);
263
264 if (image == NULL)
265 break;
266
267 image->mtlevel = i;
268 image->mtface = face;
269
270 radeon_miptree_unreference(&image->mt);
271 }
272 }
273
274 }
275
276 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
277 {
278 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
279 struct radeon_bo *bo;
280 GLuint face = _mesa_tex_target_to_face(target);
281 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
282 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
283
284 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
285 "%s(%p, target %s, tex %p)\n",
286 __func__, ctx, _mesa_lookup_enum_by_nr(target),
287 texObj);
288
289 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
290 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
291 "%s(%p, tex %p) Trying to generate mipmap for texture "
292 "in processing by GPU.\n",
293 __func__, ctx, texObj);
294 radeon_firevertices(rmesa);
295 }
296
297 radeon_teximage_map(baseimage, GL_FALSE);
298 radeon_generate_mipmap(ctx, target, texObj);
299 radeon_teximage_unmap(baseimage);
300 }
301
302
303 /* try to find a format which will only need a memcopy */
304 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
305 GLenum srcFormat,
306 GLenum srcType, GLboolean fbo)
307 {
308 const GLuint ui = 1;
309 const GLubyte littleEndian = *((const GLubyte *)&ui);
310
311 /* r100 can only do this */
312 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
313 return _dri_texformat_argb8888;
314
315 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
316 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
317 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
318 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
319 return MESA_FORMAT_RGBA8888;
320 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
321 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
322 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
324 return MESA_FORMAT_RGBA8888_REV;
325 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
326 return _dri_texformat_argb8888;
327 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
328 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
329 return MESA_FORMAT_ARGB8888_REV;
330 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
331 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
332 return MESA_FORMAT_ARGB8888;
333 } else
334 return _dri_texformat_argb8888;
335 }
336
337 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
338 GLint internalFormat,
339 GLenum format,
340 GLenum type)
341 {
342 return radeonChooseTextureFormat(ctx, internalFormat, format,
343 type, 0);
344 }
345
346 gl_format radeonChooseTextureFormat(GLcontext * ctx,
347 GLint internalFormat,
348 GLenum format,
349 GLenum type, GLboolean fbo)
350 {
351 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
352 const GLboolean do32bpt =
353 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
354 const GLboolean force16bpt =
355 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
356 (void)format;
357
358 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
359 "%s InternalFormat=%s(%d) type=%s format=%s\n",
360 __func__,
361 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
362 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
363 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
364 "%s do32bpt=%d force16bpt=%d\n",
365 __func__, do32bpt, force16bpt);
366
367 switch (internalFormat) {
368 case 4:
369 case GL_RGBA:
370 case GL_COMPRESSED_RGBA:
371 switch (type) {
372 case GL_UNSIGNED_INT_10_10_10_2:
373 case GL_UNSIGNED_INT_2_10_10_10_REV:
374 return do32bpt ? _dri_texformat_argb8888 :
375 _dri_texformat_argb1555;
376 case GL_UNSIGNED_SHORT_4_4_4_4:
377 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
378 return _dri_texformat_argb4444;
379 case GL_UNSIGNED_SHORT_5_5_5_1:
380 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
381 return _dri_texformat_argb1555;
382 default:
383 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
384 _dri_texformat_argb4444;
385 }
386
387 case 3:
388 case GL_RGB:
389 case GL_COMPRESSED_RGB:
390 switch (type) {
391 case GL_UNSIGNED_SHORT_4_4_4_4:
392 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
393 return _dri_texformat_argb4444;
394 case GL_UNSIGNED_SHORT_5_5_5_1:
395 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
396 return _dri_texformat_argb1555;
397 case GL_UNSIGNED_SHORT_5_6_5:
398 case GL_UNSIGNED_SHORT_5_6_5_REV:
399 return _dri_texformat_rgb565;
400 default:
401 return do32bpt ? _dri_texformat_argb8888 :
402 _dri_texformat_rgb565;
403 }
404
405 case GL_RGBA8:
406 case GL_RGB10_A2:
407 case GL_RGBA12:
408 case GL_RGBA16:
409 return !force16bpt ?
410 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
411 _dri_texformat_argb4444;
412
413 case GL_RGBA4:
414 case GL_RGBA2:
415 return _dri_texformat_argb4444;
416
417 case GL_RGB5_A1:
418 return _dri_texformat_argb1555;
419
420 case GL_RGB8:
421 case GL_RGB10:
422 case GL_RGB12:
423 case GL_RGB16:
424 return !force16bpt ? _dri_texformat_argb8888 :
425 _dri_texformat_rgb565;
426
427 case GL_RGB5:
428 case GL_RGB4:
429 case GL_R3_G3_B2:
430 return _dri_texformat_rgb565;
431
432 case GL_ALPHA:
433 case GL_ALPHA4:
434 case GL_ALPHA8:
435 case GL_ALPHA12:
436 case GL_ALPHA16:
437 case GL_COMPRESSED_ALPHA:
438 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
439 in wrong rgb values (same as alpha value instead of 0). */
440 if (IS_R200_CLASS(rmesa->radeonScreen))
441 return _dri_texformat_al88;
442 else
443 return _dri_texformat_a8;
444 case 1:
445 case GL_LUMINANCE:
446 case GL_LUMINANCE4:
447 case GL_LUMINANCE8:
448 case GL_LUMINANCE12:
449 case GL_LUMINANCE16:
450 case GL_COMPRESSED_LUMINANCE:
451 return _dri_texformat_l8;
452
453 case 2:
454 case GL_LUMINANCE_ALPHA:
455 case GL_LUMINANCE4_ALPHA4:
456 case GL_LUMINANCE6_ALPHA2:
457 case GL_LUMINANCE8_ALPHA8:
458 case GL_LUMINANCE12_ALPHA4:
459 case GL_LUMINANCE12_ALPHA12:
460 case GL_LUMINANCE16_ALPHA16:
461 case GL_COMPRESSED_LUMINANCE_ALPHA:
462 return _dri_texformat_al88;
463
464 case GL_INTENSITY:
465 case GL_INTENSITY4:
466 case GL_INTENSITY8:
467 case GL_INTENSITY12:
468 case GL_INTENSITY16:
469 case GL_COMPRESSED_INTENSITY:
470 return _dri_texformat_i8;
471
472 case GL_YCBCR_MESA:
473 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
474 type == GL_UNSIGNED_BYTE)
475 return MESA_FORMAT_YCBCR;
476 else
477 return MESA_FORMAT_YCBCR_REV;
478
479 case GL_RGB_S3TC:
480 case GL_RGB4_S3TC:
481 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
482 return MESA_FORMAT_RGB_DXT1;
483
484 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
485 return MESA_FORMAT_RGBA_DXT1;
486
487 case GL_RGBA_S3TC:
488 case GL_RGBA4_S3TC:
489 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
490 return MESA_FORMAT_RGBA_DXT3;
491
492 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
493 return MESA_FORMAT_RGBA_DXT5;
494
495 case GL_ALPHA16F_ARB:
496 return MESA_FORMAT_ALPHA_FLOAT16;
497 case GL_ALPHA32F_ARB:
498 return MESA_FORMAT_ALPHA_FLOAT32;
499 case GL_LUMINANCE16F_ARB:
500 return MESA_FORMAT_LUMINANCE_FLOAT16;
501 case GL_LUMINANCE32F_ARB:
502 return MESA_FORMAT_LUMINANCE_FLOAT32;
503 case GL_LUMINANCE_ALPHA16F_ARB:
504 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
505 case GL_LUMINANCE_ALPHA32F_ARB:
506 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
507 case GL_INTENSITY16F_ARB:
508 return MESA_FORMAT_INTENSITY_FLOAT16;
509 case GL_INTENSITY32F_ARB:
510 return MESA_FORMAT_INTENSITY_FLOAT32;
511 case GL_RGB16F_ARB:
512 return MESA_FORMAT_RGBA_FLOAT16;
513 case GL_RGB32F_ARB:
514 return MESA_FORMAT_RGBA_FLOAT32;
515 case GL_RGBA16F_ARB:
516 return MESA_FORMAT_RGBA_FLOAT16;
517 case GL_RGBA32F_ARB:
518 return MESA_FORMAT_RGBA_FLOAT32;
519
520 #ifdef RADEON_R300
521 case GL_DEPTH_COMPONENT:
522 case GL_DEPTH_COMPONENT16:
523 return MESA_FORMAT_Z16;
524 case GL_DEPTH_COMPONENT24:
525 case GL_DEPTH_COMPONENT32:
526 case GL_DEPTH_STENCIL_EXT:
527 case GL_DEPTH24_STENCIL8_EXT:
528 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
529 return MESA_FORMAT_S8_Z24;
530 else
531 return MESA_FORMAT_Z16;
532 #else
533 case GL_DEPTH_COMPONENT:
534 case GL_DEPTH_COMPONENT16:
535 case GL_DEPTH_COMPONENT24:
536 case GL_DEPTH_COMPONENT32:
537 case GL_DEPTH_STENCIL_EXT:
538 case GL_DEPTH24_STENCIL8_EXT:
539 return MESA_FORMAT_S8_Z24;
540 #endif
541
542 /* EXT_texture_sRGB */
543 case GL_SRGB:
544 case GL_SRGB8:
545 case GL_SRGB_ALPHA:
546 case GL_SRGB8_ALPHA8:
547 case GL_COMPRESSED_SRGB:
548 case GL_COMPRESSED_SRGB_ALPHA:
549 return MESA_FORMAT_SRGBA8;
550
551 case GL_SLUMINANCE:
552 case GL_SLUMINANCE8:
553 case GL_COMPRESSED_SLUMINANCE:
554 return MESA_FORMAT_SL8;
555
556 case GL_SLUMINANCE_ALPHA:
557 case GL_SLUMINANCE8_ALPHA8:
558 case GL_COMPRESSED_SLUMINANCE_ALPHA:
559 return MESA_FORMAT_SLA8;
560
561 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
562 return MESA_FORMAT_SRGB_DXT1;
563 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
564 return MESA_FORMAT_SRGBA_DXT1;
565 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
566 return MESA_FORMAT_SRGBA_DXT3;
567 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
568 return MESA_FORMAT_SRGBA_DXT5;
569
570 default:
571 _mesa_problem(ctx,
572 "unexpected internalFormat 0x%x in %s",
573 (int)internalFormat, __func__);
574 return MESA_FORMAT_NONE;
575 }
576
577 return MESA_FORMAT_NONE; /* never get here */
578 }
579
580 /** Check if given image is valid within current texture object.
581 */
582 static int image_matches_texture_obj(struct gl_texture_object *texObj,
583 struct gl_texture_image *texImage,
584 unsigned level)
585 {
586 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
587
588 if (!baseImage)
589 return 0;
590
591 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
592 return 0;
593
594 const unsigned levelDiff = level - texObj->BaseLevel;
595 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
596 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
597 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
598
599 return (texImage->Width == refWidth &&
600 texImage->Height == refHeight &&
601 texImage->Depth == refDepth);
602 }
603
604 static void teximage_assign_miptree(radeonContextPtr rmesa,
605 struct gl_texture_object *texObj,
606 struct gl_texture_image *texImage,
607 unsigned face,
608 unsigned level)
609 {
610 radeonTexObj *t = radeon_tex_obj(texObj);
611 radeon_texture_image* image = get_radeon_texture_image(texImage);
612
613 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
614 * don't allocate the miptree if the teximage won't fit.
615 */
616 if (!image_matches_texture_obj(texObj, texImage, level))
617 return;
618
619 /* Try using current miptree, or create new if there isn't any */
620 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
621 radeon_miptree_unreference(&t->mt);
622 radeon_try_alloc_miptree(rmesa, t);
623 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
624 "%s: texObj %p, texImage %p, face %d, level %d, "
625 "texObj miptree doesn't match, allocated new miptree %p\n",
626 __FUNCTION__, texObj, texImage, face, level, t->mt);
627 }
628
629 /* Miptree alocation may have failed,
630 * when there was no image for baselevel specified */
631 if (t->mt) {
632 image->mtface = face;
633 image->mtlevel = level;
634 radeon_miptree_reference(t->mt, &image->mt);
635 } else
636 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
637 "%s Failed to allocate miptree.\n", __func__);
638 }
639
640 static GLuint * allocate_image_offsets(GLcontext *ctx,
641 unsigned alignedWidth,
642 unsigned height,
643 unsigned depth)
644 {
645 int i;
646 GLuint *offsets;
647
648 offsets = malloc(depth * sizeof(GLuint)) ;
649 if (!offsets) {
650 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
651 return NULL;
652 }
653
654 for (i = 0; i < depth; ++i) {
655 offsets[i] = alignedWidth * height * i;
656 }
657
658 return offsets;
659 }
660
661 /**
662 * Update a subregion of the given texture image.
663 */
664 static void radeon_store_teximage(GLcontext* ctx, int dims,
665 GLint xoffset, GLint yoffset, GLint zoffset,
666 GLsizei width, GLsizei height, GLsizei depth,
667 GLsizei imageSize,
668 GLenum format, GLenum type,
669 const GLvoid * pixels,
670 const struct gl_pixelstore_attrib *packing,
671 struct gl_texture_object *texObj,
672 struct gl_texture_image *texImage,
673 int compressed)
674 {
675 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
676 radeonTexObj *t = radeon_tex_obj(texObj);
677 radeon_texture_image* image = get_radeon_texture_image(texImage);
678
679 GLuint dstRowStride;
680 GLuint *dstImageOffsets;
681
682 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
683 "%s(%p, tex %p, image %p) compressed %d\n",
684 __func__, ctx, texObj, texImage, compressed);
685
686 if (image->mt) {
687 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
688 } else if (t->bo) {
689 /* TFP case */
690 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
691 } else {
692 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
693 }
694
695 assert(dstRowStride);
696
697 if (dims == 3) {
698 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
699 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
700 if (!dstImageOffsets) {
701 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
702 return;
703 }
704 } else {
705 dstImageOffsets = texImage->ImageOffsets;
706 }
707
708 radeon_teximage_map(image, GL_TRUE);
709
710 if (compressed) {
711 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
712 GLubyte *img_start;
713
714 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
715
716 if (!image->mt) {
717 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
718 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
719 texImage->TexFormat,
720 texImage->Width, texImage->Data);
721 }
722 else {
723 uint32_t offset;
724 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
725 offset *= _mesa_get_format_bytes(texImage->TexFormat);
726 img_start = texImage->Data + offset;
727 }
728 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
729 bytesPerRow = srcRowStride;
730 rows = (height + block_height - 1) / block_height;
731
732 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
733 }
734 else {
735 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
736 texImage->TexFormat, texImage->Data,
737 xoffset, yoffset, zoffset,
738 dstRowStride,
739 dstImageOffsets,
740 width, height, depth,
741 format, type, pixels, packing)) {
742 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
743 }
744 }
745
746 if (dims == 3) {
747 free(dstImageOffsets);
748 }
749
750 radeon_teximage_unmap(image);
751 }
752
753 /**
754 * All glTexImage calls go through this function.
755 */
756 static void radeon_teximage(
757 GLcontext *ctx, int dims,
758 GLenum target, GLint level,
759 GLint internalFormat,
760 GLint width, GLint height, GLint depth,
761 GLsizei imageSize,
762 GLenum format, GLenum type, const GLvoid * pixels,
763 const struct gl_pixelstore_attrib *packing,
764 struct gl_texture_object *texObj,
765 struct gl_texture_image *texImage,
766 int compressed)
767 {
768 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
769 radeonTexObj* t = radeon_tex_obj(texObj);
770 radeon_texture_image* image = get_radeon_texture_image(texImage);
771 GLint postConvWidth = width;
772 GLint postConvHeight = height;
773 GLuint face = _mesa_tex_target_to_face(target);
774
775 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
776 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
777 __func__, dims, texObj, texImage, face, level);
778 {
779 struct radeon_bo *bo;
780 bo = !image->mt ? image->bo : image->mt->bo;
781 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
782 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
783 "%s Calling teximage for texture that is "
784 "queued for GPU processing.\n",
785 __func__);
786 radeon_firevertices(rmesa);
787 }
788 }
789
790
791 t->validated = GL_FALSE;
792
793 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
794 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
795 &postConvHeight);
796 }
797
798 if (!_mesa_is_format_compressed(texImage->TexFormat)) {
799 GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
800 /* Minimum pitch of 32 bytes */
801 if (postConvWidth * texelBytes < 32) {
802 postConvWidth = 32 / texelBytes;
803 texImage->RowStride = postConvWidth;
804 }
805 if (!image->mt) {
806 assert(texImage->RowStride == postConvWidth);
807 }
808 }
809
810 /* Mesa core only clears texImage->Data but not image->mt */
811 radeonFreeTexImageData(ctx, texImage);
812
813 if (!t->bo) {
814 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
815 if (!image->mt) {
816 int size = _mesa_format_image_size(texImage->TexFormat,
817 texImage->Width,
818 texImage->Height,
819 texImage->Depth);
820 texImage->Data = _mesa_alloc_texmemory(size);
821 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
822 "%s %dd: texObj %p, texImage %p, "
823 " no miptree assigned, using local memory %p\n",
824 __func__, dims, texObj, texImage, texImage->Data);
825 }
826 }
827
828 /* Upload texture image; note that the spec allows pixels to be NULL */
829 if (compressed) {
830 pixels = _mesa_validate_pbo_compressed_teximage(
831 ctx, imageSize, pixels, packing, "glCompressedTexImage");
832 } else {
833 pixels = _mesa_validate_pbo_teximage(
834 ctx, dims, width, height, depth,
835 format, type, pixels, packing, "glTexImage");
836 }
837
838 if (pixels) {
839 radeon_store_teximage(ctx, dims,
840 0, 0, 0,
841 width, height, depth,
842 imageSize, format, type,
843 pixels, packing,
844 texObj, texImage,
845 compressed);
846 }
847
848 _mesa_unmap_teximage_pbo(ctx, packing);
849 }
850
851 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
852 GLint internalFormat,
853 GLint width, GLint border,
854 GLenum format, GLenum type, const GLvoid * pixels,
855 const struct gl_pixelstore_attrib *packing,
856 struct gl_texture_object *texObj,
857 struct gl_texture_image *texImage)
858 {
859 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
860 0, format, type, pixels, packing, texObj, texImage, 0);
861 }
862
863 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
864 GLint internalFormat,
865 GLint width, GLint height, GLint border,
866 GLenum format, GLenum type, const GLvoid * pixels,
867 const struct gl_pixelstore_attrib *packing,
868 struct gl_texture_object *texObj,
869 struct gl_texture_image *texImage)
870
871 {
872 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
873 0, format, type, pixels, packing, texObj, texImage, 0);
874 }
875
876 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
877 GLint level, GLint internalFormat,
878 GLint width, GLint height, GLint border,
879 GLsizei imageSize, const GLvoid * data,
880 struct gl_texture_object *texObj,
881 struct gl_texture_image *texImage)
882 {
883 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
884 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
885 }
886
887 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
888 GLint internalFormat,
889 GLint width, GLint height, GLint depth,
890 GLint border,
891 GLenum format, GLenum type, const GLvoid * pixels,
892 const struct gl_pixelstore_attrib *packing,
893 struct gl_texture_object *texObj,
894 struct gl_texture_image *texImage)
895 {
896 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
897 0, format, type, pixels, packing, texObj, texImage, 0);
898 }
899
900 /**
901 * All glTexSubImage calls go through this function.
902 */
903 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
904 GLint xoffset, GLint yoffset, GLint zoffset,
905 GLsizei width, GLsizei height, GLsizei depth,
906 GLsizei imageSize,
907 GLenum format, GLenum type,
908 const GLvoid * pixels,
909 const struct gl_pixelstore_attrib *packing,
910 struct gl_texture_object *texObj,
911 struct gl_texture_image *texImage,
912 int compressed)
913 {
914 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
915 radeonTexObj* t = radeon_tex_obj(texObj);
916 radeon_texture_image* image = get_radeon_texture_image(texImage);
917
918 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
919 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
920 __func__, dims, texObj, texImage,
921 _mesa_tex_target_to_face(target), level);
922 {
923 struct radeon_bo *bo;
924 bo = !image->mt ? image->bo : image->mt->bo;
925 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
926 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
927 "%s Calling texsubimage for texture that is "
928 "queued for GPU processing.\n",
929 __func__);
930 radeon_firevertices(rmesa);
931 }
932 }
933
934
935 t->validated = GL_FALSE;
936 if (compressed) {
937 pixels = _mesa_validate_pbo_compressed_teximage(
938 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
939 } else {
940 pixels = _mesa_validate_pbo_teximage(ctx, dims,
941 width, height, depth, format, type, pixels, packing, "glTexSubImage");
942 }
943
944 if (pixels) {
945 radeon_store_teximage(ctx, dims,
946 xoffset, yoffset, zoffset,
947 width, height, depth,
948 imageSize, format, type,
949 pixels, packing,
950 texObj, texImage,
951 compressed);
952 }
953
954 _mesa_unmap_teximage_pbo(ctx, packing);
955 }
956
957 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
958 GLint xoffset,
959 GLsizei width,
960 GLenum format, GLenum type,
961 const GLvoid * pixels,
962 const struct gl_pixelstore_attrib *packing,
963 struct gl_texture_object *texObj,
964 struct gl_texture_image *texImage)
965 {
966 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
967 format, type, pixels, packing, texObj, texImage, 0);
968 }
969
970 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
971 GLint xoffset, GLint yoffset,
972 GLsizei width, GLsizei height,
973 GLenum format, GLenum type,
974 const GLvoid * pixels,
975 const struct gl_pixelstore_attrib *packing,
976 struct gl_texture_object *texObj,
977 struct gl_texture_image *texImage)
978 {
979 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
980 0, format, type, pixels, packing, texObj, texImage,
981 0);
982 }
983
984 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
985 GLint level, GLint xoffset,
986 GLint yoffset, GLsizei width,
987 GLsizei height, GLenum format,
988 GLsizei imageSize, const GLvoid * data,
989 struct gl_texture_object *texObj,
990 struct gl_texture_image *texImage)
991 {
992 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
993 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
994 }
995
996
997 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
998 GLint xoffset, GLint yoffset, GLint zoffset,
999 GLsizei width, GLsizei height, GLsizei depth,
1000 GLenum format, GLenum type,
1001 const GLvoid * pixels,
1002 const struct gl_pixelstore_attrib *packing,
1003 struct gl_texture_object *texObj,
1004 struct gl_texture_image *texImage)
1005 {
1006 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
1007 format, type, pixels, packing, texObj, texImage, 0);
1008 }
1009
1010 unsigned radeonIsFormatRenderable(gl_format mesa_format)
1011 {
1012 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
1013 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1014 return 1;
1015
1016 switch (mesa_format)
1017 {
1018 case MESA_FORMAT_Z16:
1019 case MESA_FORMAT_S8_Z24:
1020 return 1;
1021 default:
1022 return 0;
1023 }
1024 }