Merge branch 'glsl2-head' into glsl2
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/convolve.h"
36 #include "main/enums.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42 #include "drivers/common/meta.h"
43
44 #include "xmlpool.h" /* for symbolic values of enum-type options */
45
46 #include "radeon_common.h"
47
48 #include "radeon_mipmap_tree.h"
49
50
51 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
52 GLuint numrows, GLuint rowsize)
53 {
54 assert(rowsize <= dststride);
55 assert(rowsize <= srcstride);
56
57 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
58 "%s dst %p, stride %u, src %p, stride %u, "
59 "numrows %u, rowsize %u.\n",
60 __func__, dst, dststride,
61 src, srcstride,
62 numrows, rowsize);
63
64 if (rowsize == srcstride && rowsize == dststride) {
65 memcpy(dst, src, numrows*rowsize);
66 } else {
67 GLuint i;
68 for(i = 0; i < numrows; ++i) {
69 memcpy(dst, src, rowsize);
70 dst += dststride;
71 src += srcstride;
72 }
73 }
74 }
75
76 /* textures */
77 /**
78 * Allocate an empty texture image object.
79 */
80 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
81 {
82 return CALLOC(sizeof(radeon_texture_image));
83 }
84
85 /**
86 * Free memory associated with this texture image.
87 */
88 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
89 {
90 radeon_texture_image* image = get_radeon_texture_image(timage);
91
92 if (image->mt) {
93 radeon_miptree_unreference(&image->mt);
94 assert(!image->base.Data);
95 } else {
96 _mesa_free_texture_image_data(ctx, timage);
97 }
98 if (image->bo) {
99 radeon_bo_unref(image->bo);
100 image->bo = NULL;
101 }
102 if (timage->Data) {
103 _mesa_free_texmemory(timage->Data);
104 timage->Data = NULL;
105 }
106 }
107
108 /* Set Data pointer and additional data for mapped texture image */
109 static void teximage_set_map_data(radeon_texture_image *image)
110 {
111 radeon_mipmap_level *lvl;
112
113 if (!image->mt) {
114 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
115 __func__, image);
116
117 return;
118 }
119
120 lvl = &image->mt->levels[image->mtlevel];
121
122 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
123 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
124 }
125
126
127 /**
128 * Map a single texture image for glTexImage and friends.
129 */
130 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
131 {
132 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
133 "%s(img %p), write_enable %s.\n",
134 __func__, image,
135 write_enable ? "true": "false");
136 if (image->mt) {
137 assert(!image->base.Data);
138
139 radeon_bo_map(image->mt->bo, write_enable);
140 teximage_set_map_data(image);
141 }
142 }
143
144
145 void radeon_teximage_unmap(radeon_texture_image *image)
146 {
147 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
148 "%s(img %p)\n",
149 __func__, image);
150 if (image->mt) {
151 assert(image->base.Data);
152
153 image->base.Data = 0;
154 radeon_bo_unmap(image->mt->bo);
155 }
156 }
157
158 static void map_override(GLcontext *ctx, radeonTexObj *t)
159 {
160 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
161
162 radeon_bo_map(t->bo, GL_FALSE);
163
164 img->base.Data = t->bo->ptr;
165 }
166
167 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
168 {
169 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
170
171 radeon_bo_unmap(t->bo);
172
173 img->base.Data = NULL;
174 }
175
176 /**
177 * Map a validated texture for reading during software rendering.
178 */
179 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
180 {
181 radeonTexObj* t = radeon_tex_obj(texObj);
182 int face, level;
183
184 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
185 "%s(%p, tex %p)\n",
186 __func__, ctx, texObj);
187
188 if (!radeon_validate_texture_miptree(ctx, texObj)) {
189 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
190 "sw fallback.\n",
191 __func__, ctx, texObj);
192 return;
193 }
194
195 if (t->image_override && t->bo) {
196 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
197 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
198 __func__, ctx, texObj);
199
200 map_override(ctx, t);
201 }
202
203 /* for r100 3D sw fallbacks don't have mt */
204 if (!t->mt) {
205 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
206 __func__, ctx, texObj);
207 return;
208 }
209
210 radeon_bo_map(t->mt->bo, GL_FALSE);
211 for(face = 0; face < t->mt->faces; ++face) {
212 for(level = t->minLod; level <= t->maxLod; ++level)
213 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
214 }
215 }
216
217 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
218 {
219 radeonTexObj* t = radeon_tex_obj(texObj);
220 int face, level;
221
222 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
223 "%s(%p, tex %p)\n",
224 __func__, ctx, texObj);
225
226 if (t->image_override && t->bo)
227 unmap_override(ctx, t);
228 /* for r100 3D sw fallbacks don't have mt */
229 if (!t->mt)
230 return;
231
232 for(face = 0; face < t->mt->faces; ++face) {
233 for(level = t->minLod; level <= t->maxLod; ++level)
234 texObj->Image[face][level]->Data = 0;
235 }
236 radeon_bo_unmap(t->mt->bo);
237 }
238
239 /**
240 * Wraps Mesa's implementation to ensure that the base level image is mapped.
241 *
242 * This relies on internal details of _mesa_generate_mipmap, in particular
243 * the fact that the memory for recreated texture images is always freed.
244 */
245 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
246 struct gl_texture_object *texObj)
247 {
248 radeonTexObj* t = radeon_tex_obj(texObj);
249 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
250 int i, face;
251
252 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
253 "%s(%p, tex %p) Target type %s.\n",
254 __func__, ctx, texObj,
255 _mesa_lookup_enum_by_nr(target));
256
257 _mesa_generate_mipmap(ctx, target, texObj);
258
259 for (face = 0; face < nr_faces; face++) {
260 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
261 radeon_texture_image *image;
262
263 image = get_radeon_texture_image(texObj->Image[face][i]);
264
265 if (image == NULL)
266 break;
267
268 image->mtlevel = i;
269 image->mtface = face;
270
271 radeon_miptree_unreference(&image->mt);
272 }
273 }
274
275 }
276
277 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
278 {
279 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
280 struct radeon_bo *bo;
281 GLuint face = _mesa_tex_target_to_face(target);
282 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
283 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
284
285 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
286 "%s(%p, target %s, tex %p)\n",
287 __func__, ctx, _mesa_lookup_enum_by_nr(target),
288 texObj);
289
290 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
291 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
292 "%s(%p, tex %p) Trying to generate mipmap for texture "
293 "in processing by GPU.\n",
294 __func__, ctx, texObj);
295 radeon_firevertices(rmesa);
296 }
297
298 if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
299 radeon_teximage_map(baseimage, GL_FALSE);
300 radeon_generate_mipmap(ctx, target, texObj);
301 radeon_teximage_unmap(baseimage);
302 } else {
303 _mesa_meta_GenerateMipmap(ctx, target, texObj);
304 }
305 }
306
307
308 /* try to find a format which will only need a memcopy */
309 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
310 GLenum srcFormat,
311 GLenum srcType, GLboolean fbo)
312 {
313 const GLuint ui = 1;
314 const GLubyte littleEndian = *((const GLubyte *)&ui);
315
316 /* r100 can only do this */
317 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
318 return _dri_texformat_argb8888;
319
320 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
321 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
322 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
324 return MESA_FORMAT_RGBA8888;
325 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
326 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
327 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
328 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
329 return MESA_FORMAT_RGBA8888_REV;
330 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
331 return _dri_texformat_argb8888;
332 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
333 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
334 return MESA_FORMAT_ARGB8888_REV;
335 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
336 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
337 return MESA_FORMAT_ARGB8888;
338 } else
339 return _dri_texformat_argb8888;
340 }
341
342 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
343 GLint internalFormat,
344 GLenum format,
345 GLenum type)
346 {
347 return radeonChooseTextureFormat(ctx, internalFormat, format,
348 type, 0);
349 }
350
351 gl_format radeonChooseTextureFormat(GLcontext * ctx,
352 GLint internalFormat,
353 GLenum format,
354 GLenum type, GLboolean fbo)
355 {
356 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
357 const GLboolean do32bpt =
358 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
359 const GLboolean force16bpt =
360 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
361 (void)format;
362
363 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
364 "%s InternalFormat=%s(%d) type=%s format=%s\n",
365 __func__,
366 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
367 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
368 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
369 "%s do32bpt=%d force16bpt=%d\n",
370 __func__, do32bpt, force16bpt);
371
372 switch (internalFormat) {
373 case 4:
374 case GL_RGBA:
375 case GL_COMPRESSED_RGBA:
376 switch (type) {
377 case GL_UNSIGNED_INT_10_10_10_2:
378 case GL_UNSIGNED_INT_2_10_10_10_REV:
379 return do32bpt ? _dri_texformat_argb8888 :
380 _dri_texformat_argb1555;
381 case GL_UNSIGNED_SHORT_4_4_4_4:
382 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
383 return _dri_texformat_argb4444;
384 case GL_UNSIGNED_SHORT_5_5_5_1:
385 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
386 return _dri_texformat_argb1555;
387 default:
388 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
389 _dri_texformat_argb4444;
390 }
391
392 case 3:
393 case GL_RGB:
394 case GL_COMPRESSED_RGB:
395 switch (type) {
396 case GL_UNSIGNED_SHORT_4_4_4_4:
397 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
398 return _dri_texformat_argb4444;
399 case GL_UNSIGNED_SHORT_5_5_5_1:
400 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
401 return _dri_texformat_argb1555;
402 case GL_UNSIGNED_SHORT_5_6_5:
403 case GL_UNSIGNED_SHORT_5_6_5_REV:
404 return _dri_texformat_rgb565;
405 default:
406 return do32bpt ? _dri_texformat_argb8888 :
407 _dri_texformat_rgb565;
408 }
409
410 case GL_RGBA8:
411 case GL_RGB10_A2:
412 case GL_RGBA12:
413 case GL_RGBA16:
414 return !force16bpt ?
415 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
416 _dri_texformat_argb4444;
417
418 case GL_RGBA4:
419 case GL_RGBA2:
420 return _dri_texformat_argb4444;
421
422 case GL_RGB5_A1:
423 return _dri_texformat_argb1555;
424
425 case GL_RGB8:
426 case GL_RGB10:
427 case GL_RGB12:
428 case GL_RGB16:
429 return !force16bpt ? _dri_texformat_argb8888 :
430 _dri_texformat_rgb565;
431
432 case GL_RGB5:
433 case GL_RGB4:
434 case GL_R3_G3_B2:
435 return _dri_texformat_rgb565;
436
437 case GL_ALPHA:
438 case GL_ALPHA4:
439 case GL_ALPHA8:
440 case GL_ALPHA12:
441 case GL_ALPHA16:
442 case GL_COMPRESSED_ALPHA:
443 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
444 in wrong rgb values (same as alpha value instead of 0). */
445 if (IS_R200_CLASS(rmesa->radeonScreen))
446 return _dri_texformat_al88;
447 else
448 return _dri_texformat_a8;
449 case 1:
450 case GL_LUMINANCE:
451 case GL_LUMINANCE4:
452 case GL_LUMINANCE8:
453 case GL_LUMINANCE12:
454 case GL_LUMINANCE16:
455 case GL_COMPRESSED_LUMINANCE:
456 return _dri_texformat_l8;
457
458 case 2:
459 case GL_LUMINANCE_ALPHA:
460 case GL_LUMINANCE4_ALPHA4:
461 case GL_LUMINANCE6_ALPHA2:
462 case GL_LUMINANCE8_ALPHA8:
463 case GL_LUMINANCE12_ALPHA4:
464 case GL_LUMINANCE12_ALPHA12:
465 case GL_LUMINANCE16_ALPHA16:
466 case GL_COMPRESSED_LUMINANCE_ALPHA:
467 return _dri_texformat_al88;
468
469 case GL_INTENSITY:
470 case GL_INTENSITY4:
471 case GL_INTENSITY8:
472 case GL_INTENSITY12:
473 case GL_INTENSITY16:
474 case GL_COMPRESSED_INTENSITY:
475 return _dri_texformat_i8;
476
477 case GL_YCBCR_MESA:
478 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
479 type == GL_UNSIGNED_BYTE)
480 return MESA_FORMAT_YCBCR;
481 else
482 return MESA_FORMAT_YCBCR_REV;
483
484 case GL_RGB_S3TC:
485 case GL_RGB4_S3TC:
486 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
487 return MESA_FORMAT_RGB_DXT1;
488
489 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
490 return MESA_FORMAT_RGBA_DXT1;
491
492 case GL_RGBA_S3TC:
493 case GL_RGBA4_S3TC:
494 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
495 return MESA_FORMAT_RGBA_DXT3;
496
497 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
498 return MESA_FORMAT_RGBA_DXT5;
499
500 case GL_ALPHA16F_ARB:
501 return MESA_FORMAT_ALPHA_FLOAT16;
502 case GL_ALPHA32F_ARB:
503 return MESA_FORMAT_ALPHA_FLOAT32;
504 case GL_LUMINANCE16F_ARB:
505 return MESA_FORMAT_LUMINANCE_FLOAT16;
506 case GL_LUMINANCE32F_ARB:
507 return MESA_FORMAT_LUMINANCE_FLOAT32;
508 case GL_LUMINANCE_ALPHA16F_ARB:
509 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
510 case GL_LUMINANCE_ALPHA32F_ARB:
511 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
512 case GL_INTENSITY16F_ARB:
513 return MESA_FORMAT_INTENSITY_FLOAT16;
514 case GL_INTENSITY32F_ARB:
515 return MESA_FORMAT_INTENSITY_FLOAT32;
516 case GL_RGB16F_ARB:
517 return MESA_FORMAT_RGBA_FLOAT16;
518 case GL_RGB32F_ARB:
519 return MESA_FORMAT_RGBA_FLOAT32;
520 case GL_RGBA16F_ARB:
521 return MESA_FORMAT_RGBA_FLOAT16;
522 case GL_RGBA32F_ARB:
523 return MESA_FORMAT_RGBA_FLOAT32;
524
525 #ifdef RADEON_R300
526 case GL_DEPTH_COMPONENT:
527 case GL_DEPTH_COMPONENT16:
528 return MESA_FORMAT_Z16;
529 case GL_DEPTH_COMPONENT24:
530 case GL_DEPTH_COMPONENT32:
531 case GL_DEPTH_STENCIL_EXT:
532 case GL_DEPTH24_STENCIL8_EXT:
533 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
534 return MESA_FORMAT_S8_Z24;
535 else
536 return MESA_FORMAT_Z16;
537 #else
538 case GL_DEPTH_COMPONENT:
539 case GL_DEPTH_COMPONENT16:
540 case GL_DEPTH_COMPONENT24:
541 case GL_DEPTH_COMPONENT32:
542 case GL_DEPTH_STENCIL_EXT:
543 case GL_DEPTH24_STENCIL8_EXT:
544 return MESA_FORMAT_S8_Z24;
545 #endif
546
547 /* EXT_texture_sRGB */
548 case GL_SRGB:
549 case GL_SRGB8:
550 case GL_SRGB_ALPHA:
551 case GL_SRGB8_ALPHA8:
552 case GL_COMPRESSED_SRGB:
553 case GL_COMPRESSED_SRGB_ALPHA:
554 return MESA_FORMAT_SRGBA8;
555
556 case GL_SLUMINANCE:
557 case GL_SLUMINANCE8:
558 case GL_COMPRESSED_SLUMINANCE:
559 return MESA_FORMAT_SL8;
560
561 case GL_SLUMINANCE_ALPHA:
562 case GL_SLUMINANCE8_ALPHA8:
563 case GL_COMPRESSED_SLUMINANCE_ALPHA:
564 return MESA_FORMAT_SLA8;
565
566 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
567 return MESA_FORMAT_SRGB_DXT1;
568 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
569 return MESA_FORMAT_SRGBA_DXT1;
570 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
571 return MESA_FORMAT_SRGBA_DXT3;
572 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
573 return MESA_FORMAT_SRGBA_DXT5;
574
575 default:
576 _mesa_problem(ctx,
577 "unexpected internalFormat 0x%x in %s",
578 (int)internalFormat, __func__);
579 return MESA_FORMAT_NONE;
580 }
581
582 return MESA_FORMAT_NONE; /* never get here */
583 }
584
585 /** Check if given image is valid within current texture object.
586 */
587 static int image_matches_texture_obj(struct gl_texture_object *texObj,
588 struct gl_texture_image *texImage,
589 unsigned level)
590 {
591 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
592
593 if (!baseImage)
594 return 0;
595
596 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
597 return 0;
598
599 const unsigned levelDiff = level - texObj->BaseLevel;
600 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
601 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
602 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
603
604 return (texImage->Width == refWidth &&
605 texImage->Height == refHeight &&
606 texImage->Depth == refDepth);
607 }
608
609 static void teximage_assign_miptree(radeonContextPtr rmesa,
610 struct gl_texture_object *texObj,
611 struct gl_texture_image *texImage,
612 unsigned face,
613 unsigned level)
614 {
615 radeonTexObj *t = radeon_tex_obj(texObj);
616 radeon_texture_image* image = get_radeon_texture_image(texImage);
617
618 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
619 * don't allocate the miptree if the teximage won't fit.
620 */
621 if (!image_matches_texture_obj(texObj, texImage, level))
622 return;
623
624 /* Try using current miptree, or create new if there isn't any */
625 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
626 radeon_miptree_unreference(&t->mt);
627 radeon_try_alloc_miptree(rmesa, t);
628 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
629 "%s: texObj %p, texImage %p, face %d, level %d, "
630 "texObj miptree doesn't match, allocated new miptree %p\n",
631 __FUNCTION__, texObj, texImage, face, level, t->mt);
632 }
633
634 /* Miptree alocation may have failed,
635 * when there was no image for baselevel specified */
636 if (t->mt) {
637 image->mtface = face;
638 image->mtlevel = level;
639 radeon_miptree_reference(t->mt, &image->mt);
640 } else
641 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
642 "%s Failed to allocate miptree.\n", __func__);
643 }
644
645 static GLuint * allocate_image_offsets(GLcontext *ctx,
646 unsigned alignedWidth,
647 unsigned height,
648 unsigned depth)
649 {
650 int i;
651 GLuint *offsets;
652
653 offsets = malloc(depth * sizeof(GLuint)) ;
654 if (!offsets) {
655 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
656 return NULL;
657 }
658
659 for (i = 0; i < depth; ++i) {
660 offsets[i] = alignedWidth * height * i;
661 }
662
663 return offsets;
664 }
665
666 /**
667 * Update a subregion of the given texture image.
668 */
669 static void radeon_store_teximage(GLcontext* ctx, int dims,
670 GLint xoffset, GLint yoffset, GLint zoffset,
671 GLsizei width, GLsizei height, GLsizei depth,
672 GLsizei imageSize,
673 GLenum format, GLenum type,
674 const GLvoid * pixels,
675 const struct gl_pixelstore_attrib *packing,
676 struct gl_texture_object *texObj,
677 struct gl_texture_image *texImage,
678 int compressed)
679 {
680 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
681 radeonTexObj *t = radeon_tex_obj(texObj);
682 radeon_texture_image* image = get_radeon_texture_image(texImage);
683
684 GLuint dstRowStride;
685 GLuint *dstImageOffsets;
686
687 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
688 "%s(%p, tex %p, image %p) compressed %d\n",
689 __func__, ctx, texObj, texImage, compressed);
690
691 if (image->mt) {
692 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
693 } else if (t->bo) {
694 /* TFP case */
695 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
696 } else {
697 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
698 }
699
700 assert(dstRowStride);
701
702 if (dims == 3) {
703 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
704 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
705 if (!dstImageOffsets) {
706 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
707 return;
708 }
709 } else {
710 dstImageOffsets = texImage->ImageOffsets;
711 }
712
713 radeon_teximage_map(image, GL_TRUE);
714
715 if (compressed) {
716 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
717 GLubyte *img_start;
718
719 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
720
721 if (!image->mt) {
722 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
723 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
724 texImage->TexFormat,
725 texImage->Width, texImage->Data);
726 }
727 else {
728 uint32_t offset;
729 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
730 offset *= _mesa_get_format_bytes(texImage->TexFormat);
731 img_start = texImage->Data + offset;
732 }
733 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
734 bytesPerRow = srcRowStride;
735 rows = (height + block_height - 1) / block_height;
736
737 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
738 }
739 else {
740 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
741 texImage->TexFormat, texImage->Data,
742 xoffset, yoffset, zoffset,
743 dstRowStride,
744 dstImageOffsets,
745 width, height, depth,
746 format, type, pixels, packing)) {
747 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
748 }
749 }
750
751 if (dims == 3) {
752 free(dstImageOffsets);
753 }
754
755 radeon_teximage_unmap(image);
756 }
757
758 /**
759 * All glTexImage calls go through this function.
760 */
761 static void radeon_teximage(
762 GLcontext *ctx, int dims,
763 GLenum target, GLint level,
764 GLint internalFormat,
765 GLint width, GLint height, GLint depth,
766 GLsizei imageSize,
767 GLenum format, GLenum type, const GLvoid * pixels,
768 const struct gl_pixelstore_attrib *packing,
769 struct gl_texture_object *texObj,
770 struct gl_texture_image *texImage,
771 int compressed)
772 {
773 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
774 radeonTexObj* t = radeon_tex_obj(texObj);
775 radeon_texture_image* image = get_radeon_texture_image(texImage);
776 GLint postConvWidth = width;
777 GLint postConvHeight = height;
778 GLuint face = _mesa_tex_target_to_face(target);
779
780 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
781 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
782 __func__, dims, texObj, texImage, face, level);
783 {
784 struct radeon_bo *bo;
785 bo = !image->mt ? image->bo : image->mt->bo;
786 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
787 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
788 "%s Calling teximage for texture that is "
789 "queued for GPU processing.\n",
790 __func__);
791 radeon_firevertices(rmesa);
792 }
793 }
794
795
796 t->validated = GL_FALSE;
797
798 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
799 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
800 &postConvHeight);
801 }
802
803 if (!_mesa_is_format_compressed(texImage->TexFormat)) {
804 GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
805 /* Minimum pitch of 32 bytes */
806 if (postConvWidth * texelBytes < 32) {
807 postConvWidth = 32 / texelBytes;
808 texImage->RowStride = postConvWidth;
809 }
810 if (!image->mt) {
811 assert(texImage->RowStride == postConvWidth);
812 }
813 }
814
815 /* Mesa core only clears texImage->Data but not image->mt */
816 radeonFreeTexImageData(ctx, texImage);
817
818 if (!t->bo) {
819 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
820 if (!image->mt) {
821 int size = _mesa_format_image_size(texImage->TexFormat,
822 texImage->Width,
823 texImage->Height,
824 texImage->Depth);
825 texImage->Data = _mesa_alloc_texmemory(size);
826 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
827 "%s %dd: texObj %p, texImage %p, "
828 " no miptree assigned, using local memory %p\n",
829 __func__, dims, texObj, texImage, texImage->Data);
830 }
831 }
832
833 /* Upload texture image; note that the spec allows pixels to be NULL */
834 if (compressed) {
835 pixels = _mesa_validate_pbo_compressed_teximage(
836 ctx, imageSize, pixels, packing, "glCompressedTexImage");
837 } else {
838 pixels = _mesa_validate_pbo_teximage(
839 ctx, dims, width, height, depth,
840 format, type, pixels, packing, "glTexImage");
841 }
842
843 if (pixels) {
844 radeon_store_teximage(ctx, dims,
845 0, 0, 0,
846 width, height, depth,
847 imageSize, format, type,
848 pixels, packing,
849 texObj, texImage,
850 compressed);
851 }
852
853 _mesa_unmap_teximage_pbo(ctx, packing);
854 }
855
856 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
857 GLint internalFormat,
858 GLint width, GLint border,
859 GLenum format, GLenum type, const GLvoid * pixels,
860 const struct gl_pixelstore_attrib *packing,
861 struct gl_texture_object *texObj,
862 struct gl_texture_image *texImage)
863 {
864 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
865 0, format, type, pixels, packing, texObj, texImage, 0);
866 }
867
868 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
869 GLint internalFormat,
870 GLint width, GLint height, GLint border,
871 GLenum format, GLenum type, const GLvoid * pixels,
872 const struct gl_pixelstore_attrib *packing,
873 struct gl_texture_object *texObj,
874 struct gl_texture_image *texImage)
875
876 {
877 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
878 0, format, type, pixels, packing, texObj, texImage, 0);
879 }
880
881 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
882 GLint level, GLint internalFormat,
883 GLint width, GLint height, GLint border,
884 GLsizei imageSize, const GLvoid * data,
885 struct gl_texture_object *texObj,
886 struct gl_texture_image *texImage)
887 {
888 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
889 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
890 }
891
892 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
893 GLint internalFormat,
894 GLint width, GLint height, GLint depth,
895 GLint border,
896 GLenum format, GLenum type, const GLvoid * pixels,
897 const struct gl_pixelstore_attrib *packing,
898 struct gl_texture_object *texObj,
899 struct gl_texture_image *texImage)
900 {
901 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
902 0, format, type, pixels, packing, texObj, texImage, 0);
903 }
904
905 /**
906 * All glTexSubImage calls go through this function.
907 */
908 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
909 GLint xoffset, GLint yoffset, GLint zoffset,
910 GLsizei width, GLsizei height, GLsizei depth,
911 GLsizei imageSize,
912 GLenum format, GLenum type,
913 const GLvoid * pixels,
914 const struct gl_pixelstore_attrib *packing,
915 struct gl_texture_object *texObj,
916 struct gl_texture_image *texImage,
917 int compressed)
918 {
919 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
920 radeonTexObj* t = radeon_tex_obj(texObj);
921 radeon_texture_image* image = get_radeon_texture_image(texImage);
922
923 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
924 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
925 __func__, dims, texObj, texImage,
926 _mesa_tex_target_to_face(target), level);
927 {
928 struct radeon_bo *bo;
929 bo = !image->mt ? image->bo : image->mt->bo;
930 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
931 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
932 "%s Calling texsubimage for texture that is "
933 "queued for GPU processing.\n",
934 __func__);
935 radeon_firevertices(rmesa);
936 }
937 }
938
939
940 t->validated = GL_FALSE;
941 if (compressed) {
942 pixels = _mesa_validate_pbo_compressed_teximage(
943 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
944 } else {
945 pixels = _mesa_validate_pbo_teximage(ctx, dims,
946 width, height, depth, format, type, pixels, packing, "glTexSubImage");
947 }
948
949 if (pixels) {
950 radeon_store_teximage(ctx, dims,
951 xoffset, yoffset, zoffset,
952 width, height, depth,
953 imageSize, format, type,
954 pixels, packing,
955 texObj, texImage,
956 compressed);
957 }
958
959 _mesa_unmap_teximage_pbo(ctx, packing);
960 }
961
962 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
963 GLint xoffset,
964 GLsizei width,
965 GLenum format, GLenum type,
966 const GLvoid * pixels,
967 const struct gl_pixelstore_attrib *packing,
968 struct gl_texture_object *texObj,
969 struct gl_texture_image *texImage)
970 {
971 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
972 format, type, pixels, packing, texObj, texImage, 0);
973 }
974
975 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
976 GLint xoffset, GLint yoffset,
977 GLsizei width, GLsizei height,
978 GLenum format, GLenum type,
979 const GLvoid * pixels,
980 const struct gl_pixelstore_attrib *packing,
981 struct gl_texture_object *texObj,
982 struct gl_texture_image *texImage)
983 {
984 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
985 0, format, type, pixels, packing, texObj, texImage,
986 0);
987 }
988
989 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
990 GLint level, GLint xoffset,
991 GLint yoffset, GLsizei width,
992 GLsizei height, GLenum format,
993 GLsizei imageSize, const GLvoid * data,
994 struct gl_texture_object *texObj,
995 struct gl_texture_image *texImage)
996 {
997 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
998 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
999 }
1000
1001
1002 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
1003 GLint xoffset, GLint yoffset, GLint zoffset,
1004 GLsizei width, GLsizei height, GLsizei depth,
1005 GLenum format, GLenum type,
1006 const GLvoid * pixels,
1007 const struct gl_pixelstore_attrib *packing,
1008 struct gl_texture_object *texObj,
1009 struct gl_texture_image *texImage)
1010 {
1011 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
1012 format, type, pixels, packing, texObj, texImage, 0);
1013 }
1014
1015 unsigned radeonIsFormatRenderable(gl_format mesa_format)
1016 {
1017 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
1018 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1019 return 1;
1020
1021 switch (mesa_format)
1022 {
1023 case MESA_FORMAT_Z16:
1024 case MESA_FORMAT_S8_Z24:
1025 return 1;
1026 default:
1027 return 0;
1028 }
1029 }