Merge remote branch 'origin/master' into lp-setup-llvm
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/enums.h"
36 #include "main/mipmap.h"
37 #include "main/texcompress.h"
38 #include "main/texstore.h"
39 #include "main/teximage.h"
40 #include "main/texobj.h"
41 #include "drivers/common/meta.h"
42
43 #include "xmlpool.h" /* for symbolic values of enum-type options */
44
45 #include "radeon_common.h"
46
47 #include "radeon_mipmap_tree.h"
48
49
50 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
51 GLuint numrows, GLuint rowsize)
52 {
53 assert(rowsize <= dststride);
54 assert(rowsize <= srcstride);
55
56 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
57 "%s dst %p, stride %u, src %p, stride %u, "
58 "numrows %u, rowsize %u.\n",
59 __func__, dst, dststride,
60 src, srcstride,
61 numrows, rowsize);
62
63 if (rowsize == srcstride && rowsize == dststride) {
64 memcpy(dst, src, numrows*rowsize);
65 } else {
66 GLuint i;
67 for(i = 0; i < numrows; ++i) {
68 memcpy(dst, src, rowsize);
69 dst += dststride;
70 src += srcstride;
71 }
72 }
73 }
74
75 /* textures */
76 /**
77 * Allocate an empty texture image object.
78 */
79 struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx)
80 {
81 return CALLOC(sizeof(radeon_texture_image));
82 }
83
84 /**
85 * Free memory associated with this texture image.
86 */
87 void radeonFreeTexImageData(struct gl_context *ctx, struct gl_texture_image *timage)
88 {
89 radeon_texture_image* image = get_radeon_texture_image(timage);
90
91 if (image->mt) {
92 radeon_miptree_unreference(&image->mt);
93 assert(!image->base.Data);
94 } else {
95 _mesa_free_texture_image_data(ctx, timage);
96 }
97 if (image->bo) {
98 radeon_bo_unref(image->bo);
99 image->bo = NULL;
100 }
101 if (timage->Data) {
102 _mesa_free_texmemory(timage->Data);
103 timage->Data = NULL;
104 }
105 }
106
107 /* Set Data pointer and additional data for mapped texture image */
108 static void teximage_set_map_data(radeon_texture_image *image)
109 {
110 radeon_mipmap_level *lvl;
111
112 if (!image->mt) {
113 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
114 __func__, image);
115
116 return;
117 }
118
119 lvl = &image->mt->levels[image->mtlevel];
120
121 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
122 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
123 }
124
125
126 /**
127 * Map a single texture image for glTexImage and friends.
128 */
129 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
130 {
131 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
132 "%s(img %p), write_enable %s.\n",
133 __func__, image,
134 write_enable ? "true": "false");
135 if (image->mt) {
136 assert(!image->base.Data);
137
138 radeon_bo_map(image->mt->bo, write_enable);
139 teximage_set_map_data(image);
140 }
141 }
142
143
144 void radeon_teximage_unmap(radeon_texture_image *image)
145 {
146 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
147 "%s(img %p)\n",
148 __func__, image);
149 if (image->mt) {
150 assert(image->base.Data);
151
152 image->base.Data = 0;
153 radeon_bo_unmap(image->mt->bo);
154 }
155 }
156
157 static void map_override(struct gl_context *ctx, radeonTexObj *t)
158 {
159 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
160
161 radeon_bo_map(t->bo, GL_FALSE);
162
163 img->base.Data = t->bo->ptr;
164 }
165
166 static void unmap_override(struct gl_context *ctx, radeonTexObj *t)
167 {
168 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
169
170 radeon_bo_unmap(t->bo);
171
172 img->base.Data = NULL;
173 }
174
175 /**
176 * Map a validated texture for reading during software rendering.
177 */
178 void radeonMapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
179 {
180 radeonTexObj* t = radeon_tex_obj(texObj);
181 int face, level;
182
183 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
184 "%s(%p, tex %p)\n",
185 __func__, ctx, texObj);
186
187 if (!radeon_validate_texture_miptree(ctx, texObj)) {
188 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
189 "sw fallback.\n",
190 __func__, ctx, texObj);
191 return;
192 }
193
194 if (t->image_override && t->bo) {
195 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
196 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
197 __func__, ctx, texObj);
198
199 map_override(ctx, t);
200 }
201
202 /* for r100 3D sw fallbacks don't have mt */
203 if (!t->mt) {
204 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
205 __func__, ctx, texObj);
206 return;
207 }
208
209 radeon_bo_map(t->mt->bo, GL_FALSE);
210 for(face = 0; face < t->mt->faces; ++face) {
211 for(level = t->minLod; level <= t->maxLod; ++level)
212 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
213 }
214 }
215
216 void radeonUnmapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
217 {
218 radeonTexObj* t = radeon_tex_obj(texObj);
219 int face, level;
220
221 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
222 "%s(%p, tex %p)\n",
223 __func__, ctx, texObj);
224
225 if (t->image_override && t->bo)
226 unmap_override(ctx, t);
227 /* for r100 3D sw fallbacks don't have mt */
228 if (!t->mt)
229 return;
230
231 for(face = 0; face < t->mt->faces; ++face) {
232 for(level = t->minLod; level <= t->maxLod; ++level)
233 texObj->Image[face][level]->Data = 0;
234 }
235 radeon_bo_unmap(t->mt->bo);
236 }
237
238 /**
239 * Wraps Mesa's implementation to ensure that the base level image is mapped.
240 *
241 * This relies on internal details of _mesa_generate_mipmap, in particular
242 * the fact that the memory for recreated texture images is always freed.
243 */
244 static void radeon_generate_mipmap(struct gl_context *ctx, GLenum target,
245 struct gl_texture_object *texObj)
246 {
247 radeonTexObj* t = radeon_tex_obj(texObj);
248 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
249 int i, face;
250
251 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
252 "%s(%p, tex %p) Target type %s.\n",
253 __func__, ctx, texObj,
254 _mesa_lookup_enum_by_nr(target));
255
256 _mesa_generate_mipmap(ctx, target, texObj);
257
258 for (face = 0; face < nr_faces; face++) {
259 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
260 radeon_texture_image *image;
261
262 image = get_radeon_texture_image(texObj->Image[face][i]);
263
264 if (image == NULL)
265 break;
266
267 image->mtlevel = i;
268 image->mtface = face;
269
270 radeon_miptree_unreference(&image->mt);
271 }
272 }
273
274 }
275
276 void radeonGenerateMipmap(struct gl_context* ctx, GLenum target, struct gl_texture_object *texObj)
277 {
278 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
279 struct radeon_bo *bo;
280 GLuint face = _mesa_tex_target_to_face(target);
281 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
282 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
283
284 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
285 "%s(%p, target %s, tex %p)\n",
286 __func__, ctx, _mesa_lookup_enum_by_nr(target),
287 texObj);
288
289 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
290 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
291 "%s(%p, tex %p) Trying to generate mipmap for texture "
292 "in processing by GPU.\n",
293 __func__, ctx, texObj);
294 radeon_firevertices(rmesa);
295 }
296
297 if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
298 radeon_teximage_map(baseimage, GL_FALSE);
299 radeon_generate_mipmap(ctx, target, texObj);
300 radeon_teximage_unmap(baseimage);
301 } else {
302 _mesa_meta_GenerateMipmap(ctx, target, texObj);
303 }
304 }
305
306
307 /* try to find a format which will only need a memcopy */
308 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
309 GLenum srcFormat,
310 GLenum srcType, GLboolean fbo)
311 {
312 const GLuint ui = 1;
313 const GLubyte littleEndian = *((const GLubyte *)&ui);
314
315 /* r100 can only do this */
316 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
317 return _dri_texformat_argb8888;
318
319 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
320 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
321 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
322 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
323 return MESA_FORMAT_RGBA8888;
324 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
325 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
326 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
327 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
328 return MESA_FORMAT_RGBA8888_REV;
329 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
330 return _dri_texformat_argb8888;
331 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
332 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
333 return MESA_FORMAT_ARGB8888_REV;
334 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
335 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
336 return MESA_FORMAT_ARGB8888;
337 } else
338 return _dri_texformat_argb8888;
339 }
340
341 gl_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
342 GLint internalFormat,
343 GLenum format,
344 GLenum type)
345 {
346 return radeonChooseTextureFormat(ctx, internalFormat, format,
347 type, 0);
348 }
349
350 gl_format radeonChooseTextureFormat(struct gl_context * ctx,
351 GLint internalFormat,
352 GLenum format,
353 GLenum type, GLboolean fbo)
354 {
355 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
356 const GLboolean do32bpt =
357 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
358 const GLboolean force16bpt =
359 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
360 (void)format;
361
362 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
363 "%s InternalFormat=%s(%d) type=%s format=%s\n",
364 __func__,
365 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
366 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
367 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
368 "%s do32bpt=%d force16bpt=%d\n",
369 __func__, do32bpt, force16bpt);
370
371 switch (internalFormat) {
372 case 4:
373 case GL_RGBA:
374 case GL_COMPRESSED_RGBA:
375 switch (type) {
376 case GL_UNSIGNED_INT_10_10_10_2:
377 case GL_UNSIGNED_INT_2_10_10_10_REV:
378 return do32bpt ? _dri_texformat_argb8888 :
379 _dri_texformat_argb1555;
380 case GL_UNSIGNED_SHORT_4_4_4_4:
381 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
382 return _dri_texformat_argb4444;
383 case GL_UNSIGNED_SHORT_5_5_5_1:
384 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
385 return _dri_texformat_argb1555;
386 default:
387 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
388 _dri_texformat_argb4444;
389 }
390
391 case 3:
392 case GL_RGB:
393 case GL_COMPRESSED_RGB:
394 switch (type) {
395 case GL_UNSIGNED_SHORT_4_4_4_4:
396 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
397 return _dri_texformat_argb4444;
398 case GL_UNSIGNED_SHORT_5_5_5_1:
399 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
400 return _dri_texformat_argb1555;
401 case GL_UNSIGNED_SHORT_5_6_5:
402 case GL_UNSIGNED_SHORT_5_6_5_REV:
403 return _dri_texformat_rgb565;
404 default:
405 return do32bpt ? _dri_texformat_argb8888 :
406 _dri_texformat_rgb565;
407 }
408
409 case GL_RGBA8:
410 case GL_RGB10_A2:
411 case GL_RGBA12:
412 case GL_RGBA16:
413 return !force16bpt ?
414 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
415 _dri_texformat_argb4444;
416
417 case GL_RGBA4:
418 case GL_RGBA2:
419 return _dri_texformat_argb4444;
420
421 case GL_RGB5_A1:
422 return _dri_texformat_argb1555;
423
424 case GL_RGB8:
425 case GL_RGB10:
426 case GL_RGB12:
427 case GL_RGB16:
428 return !force16bpt ? _dri_texformat_argb8888 :
429 _dri_texformat_rgb565;
430
431 case GL_RGB5:
432 case GL_RGB4:
433 case GL_R3_G3_B2:
434 return _dri_texformat_rgb565;
435
436 case GL_ALPHA:
437 case GL_ALPHA4:
438 case GL_ALPHA8:
439 case GL_ALPHA12:
440 case GL_ALPHA16:
441 case GL_COMPRESSED_ALPHA:
442 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
443 in wrong rgb values (same as alpha value instead of 0). */
444 if (IS_R200_CLASS(rmesa->radeonScreen))
445 return _dri_texformat_al88;
446 else
447 return _dri_texformat_a8;
448 case 1:
449 case GL_LUMINANCE:
450 case GL_LUMINANCE4:
451 case GL_LUMINANCE8:
452 case GL_LUMINANCE12:
453 case GL_LUMINANCE16:
454 case GL_COMPRESSED_LUMINANCE:
455 return _dri_texformat_l8;
456
457 case 2:
458 case GL_LUMINANCE_ALPHA:
459 case GL_LUMINANCE4_ALPHA4:
460 case GL_LUMINANCE6_ALPHA2:
461 case GL_LUMINANCE8_ALPHA8:
462 case GL_LUMINANCE12_ALPHA4:
463 case GL_LUMINANCE12_ALPHA12:
464 case GL_LUMINANCE16_ALPHA16:
465 case GL_COMPRESSED_LUMINANCE_ALPHA:
466 return _dri_texformat_al88;
467
468 case GL_INTENSITY:
469 case GL_INTENSITY4:
470 case GL_INTENSITY8:
471 case GL_INTENSITY12:
472 case GL_INTENSITY16:
473 case GL_COMPRESSED_INTENSITY:
474 return _dri_texformat_i8;
475
476 case GL_YCBCR_MESA:
477 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
478 type == GL_UNSIGNED_BYTE)
479 return MESA_FORMAT_YCBCR;
480 else
481 return MESA_FORMAT_YCBCR_REV;
482
483 case GL_RGB_S3TC:
484 case GL_RGB4_S3TC:
485 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
486 return MESA_FORMAT_RGB_DXT1;
487
488 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
489 return MESA_FORMAT_RGBA_DXT1;
490
491 case GL_RGBA_S3TC:
492 case GL_RGBA4_S3TC:
493 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
494 return MESA_FORMAT_RGBA_DXT3;
495
496 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
497 return MESA_FORMAT_RGBA_DXT5;
498
499 case GL_ALPHA16F_ARB:
500 return MESA_FORMAT_ALPHA_FLOAT16;
501 case GL_ALPHA32F_ARB:
502 return MESA_FORMAT_ALPHA_FLOAT32;
503 case GL_LUMINANCE16F_ARB:
504 return MESA_FORMAT_LUMINANCE_FLOAT16;
505 case GL_LUMINANCE32F_ARB:
506 return MESA_FORMAT_LUMINANCE_FLOAT32;
507 case GL_LUMINANCE_ALPHA16F_ARB:
508 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
509 case GL_LUMINANCE_ALPHA32F_ARB:
510 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
511 case GL_INTENSITY16F_ARB:
512 return MESA_FORMAT_INTENSITY_FLOAT16;
513 case GL_INTENSITY32F_ARB:
514 return MESA_FORMAT_INTENSITY_FLOAT32;
515 case GL_RGB16F_ARB:
516 return MESA_FORMAT_RGBA_FLOAT16;
517 case GL_RGB32F_ARB:
518 return MESA_FORMAT_RGBA_FLOAT32;
519 case GL_RGBA16F_ARB:
520 return MESA_FORMAT_RGBA_FLOAT16;
521 case GL_RGBA32F_ARB:
522 return MESA_FORMAT_RGBA_FLOAT32;
523
524 #ifdef RADEON_R300
525 case GL_DEPTH_COMPONENT:
526 case GL_DEPTH_COMPONENT16:
527 return MESA_FORMAT_Z16;
528 case GL_DEPTH_COMPONENT24:
529 case GL_DEPTH_COMPONENT32:
530 case GL_DEPTH_STENCIL_EXT:
531 case GL_DEPTH24_STENCIL8_EXT:
532 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
533 return MESA_FORMAT_S8_Z24;
534 else
535 return MESA_FORMAT_Z16;
536 #else
537 case GL_DEPTH_COMPONENT:
538 case GL_DEPTH_COMPONENT16:
539 case GL_DEPTH_COMPONENT24:
540 case GL_DEPTH_COMPONENT32:
541 case GL_DEPTH_STENCIL_EXT:
542 case GL_DEPTH24_STENCIL8_EXT:
543 return MESA_FORMAT_S8_Z24;
544 #endif
545
546 /* EXT_texture_sRGB */
547 case GL_SRGB:
548 case GL_SRGB8:
549 case GL_SRGB_ALPHA:
550 case GL_SRGB8_ALPHA8:
551 case GL_COMPRESSED_SRGB:
552 case GL_COMPRESSED_SRGB_ALPHA:
553 return MESA_FORMAT_SARGB8;
554
555 case GL_SLUMINANCE:
556 case GL_SLUMINANCE8:
557 case GL_COMPRESSED_SLUMINANCE:
558 return MESA_FORMAT_SL8;
559
560 case GL_SLUMINANCE_ALPHA:
561 case GL_SLUMINANCE8_ALPHA8:
562 case GL_COMPRESSED_SLUMINANCE_ALPHA:
563 return MESA_FORMAT_SLA8;
564
565 case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
566 return MESA_FORMAT_SRGB_DXT1;
567 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
568 return MESA_FORMAT_SRGBA_DXT1;
569 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
570 return MESA_FORMAT_SRGBA_DXT3;
571 case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
572 return MESA_FORMAT_SRGBA_DXT5;
573
574 default:
575 _mesa_problem(ctx,
576 "unexpected internalFormat 0x%x in %s",
577 (int)internalFormat, __func__);
578 return MESA_FORMAT_NONE;
579 }
580
581 return MESA_FORMAT_NONE; /* never get here */
582 }
583
584 /** Check if given image is valid within current texture object.
585 */
586 static int image_matches_texture_obj(struct gl_texture_object *texObj,
587 struct gl_texture_image *texImage,
588 unsigned level)
589 {
590 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
591
592 if (!baseImage)
593 return 0;
594
595 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
596 return 0;
597
598 const unsigned levelDiff = level - texObj->BaseLevel;
599 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
600 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
601 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
602
603 return (texImage->Width == refWidth &&
604 texImage->Height == refHeight &&
605 texImage->Depth == refDepth);
606 }
607
608 static void teximage_assign_miptree(radeonContextPtr rmesa,
609 struct gl_texture_object *texObj,
610 struct gl_texture_image *texImage,
611 unsigned face,
612 unsigned level)
613 {
614 radeonTexObj *t = radeon_tex_obj(texObj);
615 radeon_texture_image* image = get_radeon_texture_image(texImage);
616
617 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
618 * don't allocate the miptree if the teximage won't fit.
619 */
620 if (!image_matches_texture_obj(texObj, texImage, level))
621 return;
622
623 /* Try using current miptree, or create new if there isn't any */
624 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
625 radeon_miptree_unreference(&t->mt);
626 radeon_try_alloc_miptree(rmesa, t);
627 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
628 "%s: texObj %p, texImage %p, face %d, level %d, "
629 "texObj miptree doesn't match, allocated new miptree %p\n",
630 __FUNCTION__, texObj, texImage, face, level, t->mt);
631 }
632
633 /* Miptree alocation may have failed,
634 * when there was no image for baselevel specified */
635 if (t->mt) {
636 image->mtface = face;
637 image->mtlevel = level;
638 radeon_miptree_reference(t->mt, &image->mt);
639 } else
640 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
641 "%s Failed to allocate miptree.\n", __func__);
642 }
643
644 static GLuint * allocate_image_offsets(struct gl_context *ctx,
645 unsigned alignedWidth,
646 unsigned height,
647 unsigned depth)
648 {
649 int i;
650 GLuint *offsets;
651
652 offsets = malloc(depth * sizeof(GLuint)) ;
653 if (!offsets) {
654 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
655 return NULL;
656 }
657
658 for (i = 0; i < depth; ++i) {
659 offsets[i] = alignedWidth * height * i;
660 }
661
662 return offsets;
663 }
664
665 /**
666 * Update a subregion of the given texture image.
667 */
668 static void radeon_store_teximage(struct gl_context* ctx, int dims,
669 GLint xoffset, GLint yoffset, GLint zoffset,
670 GLsizei width, GLsizei height, GLsizei depth,
671 GLsizei imageSize,
672 GLenum format, GLenum type,
673 const GLvoid * pixels,
674 const struct gl_pixelstore_attrib *packing,
675 struct gl_texture_object *texObj,
676 struct gl_texture_image *texImage,
677 int compressed)
678 {
679 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
680 radeonTexObj *t = radeon_tex_obj(texObj);
681 radeon_texture_image* image = get_radeon_texture_image(texImage);
682
683 GLuint dstRowStride;
684 GLuint *dstImageOffsets;
685
686 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
687 "%s(%p, tex %p, image %p) compressed %d\n",
688 __func__, ctx, texObj, texImage, compressed);
689
690 if (image->mt) {
691 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
692 } else if (t->bo) {
693 /* TFP case */
694 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
695 } else {
696 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
697 }
698
699 assert(dstRowStride);
700
701 if (dims == 3) {
702 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
703 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
704 if (!dstImageOffsets) {
705 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
706 return;
707 }
708 } else {
709 dstImageOffsets = texImage->ImageOffsets;
710 }
711
712 radeon_teximage_map(image, GL_TRUE);
713
714 if (compressed) {
715 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
716 GLubyte *img_start;
717
718 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
719
720 if (!image->mt) {
721 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
722 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
723 texImage->TexFormat,
724 texImage->Width, texImage->Data);
725 }
726 else {
727 uint32_t offset;
728 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
729 offset *= _mesa_get_format_bytes(texImage->TexFormat);
730 img_start = texImage->Data + offset;
731 }
732 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
733 bytesPerRow = srcRowStride;
734 rows = (height + block_height - 1) / block_height;
735
736 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
737 }
738 else {
739 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
740 texImage->TexFormat, texImage->Data,
741 xoffset, yoffset, zoffset,
742 dstRowStride,
743 dstImageOffsets,
744 width, height, depth,
745 format, type, pixels, packing)) {
746 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
747 }
748 }
749
750 if (dims == 3) {
751 free(dstImageOffsets);
752 }
753
754 radeon_teximage_unmap(image);
755 }
756
757 /**
758 * All glTexImage calls go through this function.
759 */
760 static void radeon_teximage(
761 struct gl_context *ctx, int dims,
762 GLenum target, GLint level,
763 GLint internalFormat,
764 GLint width, GLint height, GLint depth,
765 GLsizei imageSize,
766 GLenum format, GLenum type, const GLvoid * pixels,
767 const struct gl_pixelstore_attrib *packing,
768 struct gl_texture_object *texObj,
769 struct gl_texture_image *texImage,
770 int compressed)
771 {
772 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
773 radeonTexObj* t = radeon_tex_obj(texObj);
774 radeon_texture_image* image = get_radeon_texture_image(texImage);
775 GLuint face = _mesa_tex_target_to_face(target);
776
777 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
778 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
779 __func__, dims, texObj, texImage, face, level);
780 {
781 struct radeon_bo *bo;
782 bo = !image->mt ? image->bo : image->mt->bo;
783 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
784 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
785 "%s Calling teximage for texture that is "
786 "queued for GPU processing.\n",
787 __func__);
788 radeon_firevertices(rmesa);
789 }
790 }
791
792
793 t->validated = GL_FALSE;
794
795 /* Mesa core only clears texImage->Data but not image->mt */
796 radeonFreeTexImageData(ctx, texImage);
797
798 if (!t->bo) {
799 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
800 if (!image->mt) {
801 int size = _mesa_format_image_size(texImage->TexFormat,
802 texImage->Width,
803 texImage->Height,
804 texImage->Depth);
805 texImage->Data = _mesa_alloc_texmemory(size);
806 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
807 "%s %dd: texObj %p, texImage %p, "
808 " no miptree assigned, using local memory %p\n",
809 __func__, dims, texObj, texImage, texImage->Data);
810 }
811 }
812
813 /* Upload texture image; note that the spec allows pixels to be NULL */
814 if (compressed) {
815 pixels = _mesa_validate_pbo_compressed_teximage(
816 ctx, imageSize, pixels, packing, "glCompressedTexImage");
817 } else {
818 pixels = _mesa_validate_pbo_teximage(
819 ctx, dims, width, height, depth,
820 format, type, pixels, packing, "glTexImage");
821 }
822
823 if (pixels) {
824 radeon_store_teximage(ctx, dims,
825 0, 0, 0,
826 width, height, depth,
827 imageSize, format, type,
828 pixels, packing,
829 texObj, texImage,
830 compressed);
831 }
832
833 _mesa_unmap_teximage_pbo(ctx, packing);
834 }
835
836 void radeonTexImage1D(struct gl_context * ctx, GLenum target, GLint level,
837 GLint internalFormat,
838 GLint width, GLint border,
839 GLenum format, GLenum type, const GLvoid * pixels,
840 const struct gl_pixelstore_attrib *packing,
841 struct gl_texture_object *texObj,
842 struct gl_texture_image *texImage)
843 {
844 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
845 0, format, type, pixels, packing, texObj, texImage, 0);
846 }
847
848 void radeonTexImage2D(struct gl_context * ctx, GLenum target, GLint level,
849 GLint internalFormat,
850 GLint width, GLint height, GLint border,
851 GLenum format, GLenum type, const GLvoid * pixels,
852 const struct gl_pixelstore_attrib *packing,
853 struct gl_texture_object *texObj,
854 struct gl_texture_image *texImage)
855
856 {
857 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
858 0, format, type, pixels, packing, texObj, texImage, 0);
859 }
860
861 void radeonCompressedTexImage2D(struct gl_context * ctx, GLenum target,
862 GLint level, GLint internalFormat,
863 GLint width, GLint height, GLint border,
864 GLsizei imageSize, const GLvoid * data,
865 struct gl_texture_object *texObj,
866 struct gl_texture_image *texImage)
867 {
868 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
869 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
870 }
871
872 void radeonTexImage3D(struct gl_context * ctx, GLenum target, GLint level,
873 GLint internalFormat,
874 GLint width, GLint height, GLint depth,
875 GLint border,
876 GLenum format, GLenum type, const GLvoid * pixels,
877 const struct gl_pixelstore_attrib *packing,
878 struct gl_texture_object *texObj,
879 struct gl_texture_image *texImage)
880 {
881 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
882 0, format, type, pixels, packing, texObj, texImage, 0);
883 }
884
885 /**
886 * All glTexSubImage calls go through this function.
887 */
888 static void radeon_texsubimage(struct gl_context* ctx, int dims, GLenum target, int level,
889 GLint xoffset, GLint yoffset, GLint zoffset,
890 GLsizei width, GLsizei height, GLsizei depth,
891 GLsizei imageSize,
892 GLenum format, GLenum type,
893 const GLvoid * pixels,
894 const struct gl_pixelstore_attrib *packing,
895 struct gl_texture_object *texObj,
896 struct gl_texture_image *texImage,
897 int compressed)
898 {
899 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
900 radeonTexObj* t = radeon_tex_obj(texObj);
901 radeon_texture_image* image = get_radeon_texture_image(texImage);
902
903 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
904 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
905 __func__, dims, texObj, texImage,
906 _mesa_tex_target_to_face(target), level);
907 {
908 struct radeon_bo *bo;
909 bo = !image->mt ? image->bo : image->mt->bo;
910 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
911 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
912 "%s Calling texsubimage for texture that is "
913 "queued for GPU processing.\n",
914 __func__);
915 radeon_firevertices(rmesa);
916 }
917 }
918
919
920 t->validated = GL_FALSE;
921 if (compressed) {
922 pixels = _mesa_validate_pbo_compressed_teximage(
923 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
924 } else {
925 pixels = _mesa_validate_pbo_teximage(ctx, dims,
926 width, height, depth, format, type, pixels, packing, "glTexSubImage");
927 }
928
929 if (pixels) {
930 radeon_store_teximage(ctx, dims,
931 xoffset, yoffset, zoffset,
932 width, height, depth,
933 imageSize, format, type,
934 pixels, packing,
935 texObj, texImage,
936 compressed);
937 }
938
939 _mesa_unmap_teximage_pbo(ctx, packing);
940 }
941
942 void radeonTexSubImage1D(struct gl_context * ctx, GLenum target, GLint level,
943 GLint xoffset,
944 GLsizei width,
945 GLenum format, GLenum type,
946 const GLvoid * pixels,
947 const struct gl_pixelstore_attrib *packing,
948 struct gl_texture_object *texObj,
949 struct gl_texture_image *texImage)
950 {
951 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
952 format, type, pixels, packing, texObj, texImage, 0);
953 }
954
955 void radeonTexSubImage2D(struct gl_context * ctx, GLenum target, GLint level,
956 GLint xoffset, GLint yoffset,
957 GLsizei width, GLsizei height,
958 GLenum format, GLenum type,
959 const GLvoid * pixels,
960 const struct gl_pixelstore_attrib *packing,
961 struct gl_texture_object *texObj,
962 struct gl_texture_image *texImage)
963 {
964 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
965 0, format, type, pixels, packing, texObj, texImage,
966 0);
967 }
968
969 void radeonCompressedTexSubImage2D(struct gl_context * ctx, GLenum target,
970 GLint level, GLint xoffset,
971 GLint yoffset, GLsizei width,
972 GLsizei height, GLenum format,
973 GLsizei imageSize, const GLvoid * data,
974 struct gl_texture_object *texObj,
975 struct gl_texture_image *texImage)
976 {
977 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
978 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
979 }
980
981
982 void radeonTexSubImage3D(struct gl_context * ctx, GLenum target, GLint level,
983 GLint xoffset, GLint yoffset, GLint zoffset,
984 GLsizei width, GLsizei height, GLsizei depth,
985 GLenum format, GLenum type,
986 const GLvoid * pixels,
987 const struct gl_pixelstore_attrib *packing,
988 struct gl_texture_object *texObj,
989 struct gl_texture_image *texImage)
990 {
991 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
992 format, type, pixels, packing, texObj, texImage, 0);
993 }
994
995 unsigned radeonIsFormatRenderable(gl_format mesa_format)
996 {
997 if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
998 mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
999 return 1;
1000
1001 switch (mesa_format)
1002 {
1003 case MESA_FORMAT_Z16:
1004 case MESA_FORMAT_S8_Z24:
1005 return 1;
1006 default:
1007 return 0;
1008 }
1009 }