6a8e70d47e6631f14d13d989496feb2be523c4d5
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_texture.c
1 /*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32 #include "main/glheader.h"
33 #include "main/imports.h"
34 #include "main/context.h"
35 #include "main/convolve.h"
36 #include "main/enums.h"
37 #include "main/mipmap.h"
38 #include "main/texcompress.h"
39 #include "main/texstore.h"
40 #include "main/teximage.h"
41 #include "main/texobj.h"
42
43 #include "xmlpool.h" /* for symbolic values of enum-type options */
44
45 #include "radeon_common.h"
46
47 #include "radeon_mipmap_tree.h"
48
49
50 void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
51 GLuint numrows, GLuint rowsize)
52 {
53 assert(rowsize <= dststride);
54 assert(rowsize <= srcstride);
55
56 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
57 "%s dst %p, stride %u, src %p, stride %u, "
58 "numrows %u, rowsize %u.\n",
59 __func__, dst, dststride,
60 src, srcstride,
61 numrows, rowsize);
62
63 if (rowsize == srcstride && rowsize == dststride) {
64 memcpy(dst, src, numrows*rowsize);
65 } else {
66 GLuint i;
67 for(i = 0; i < numrows; ++i) {
68 memcpy(dst, src, rowsize);
69 dst += dststride;
70 src += srcstride;
71 }
72 }
73 }
74
75 /* textures */
76 /**
77 * Allocate an empty texture image object.
78 */
79 struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
80 {
81 return CALLOC(sizeof(radeon_texture_image));
82 }
83
84 /**
85 * Free memory associated with this texture image.
86 */
87 void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
88 {
89 radeon_texture_image* image = get_radeon_texture_image(timage);
90
91 if (image->mt) {
92 radeon_miptree_unreference(&image->mt);
93 assert(!image->base.Data);
94 } else {
95 _mesa_free_texture_image_data(ctx, timage);
96 }
97 if (image->bo) {
98 radeon_bo_unref(image->bo);
99 image->bo = NULL;
100 }
101 if (timage->Data) {
102 _mesa_free_texmemory(timage->Data);
103 timage->Data = NULL;
104 }
105 }
106
107 /* Set Data pointer and additional data for mapped texture image */
108 static void teximage_set_map_data(radeon_texture_image *image)
109 {
110 radeon_mipmap_level *lvl;
111
112 if (!image->mt) {
113 radeon_warning("%s(%p) Trying to set map data without miptree.\n",
114 __func__, image);
115
116 return;
117 }
118
119 lvl = &image->mt->levels[image->mtlevel];
120
121 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
122 image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
123 }
124
125
126 /**
127 * Map a single texture image for glTexImage and friends.
128 */
129 void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
130 {
131 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
132 "%s(img %p), write_enable %s.\n",
133 __func__, image,
134 write_enable ? "true": "false");
135 if (image->mt) {
136 assert(!image->base.Data);
137
138 radeon_bo_map(image->mt->bo, write_enable);
139 teximage_set_map_data(image);
140 }
141 }
142
143
144 void radeon_teximage_unmap(radeon_texture_image *image)
145 {
146 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
147 "%s(img %p)\n",
148 __func__, image);
149 if (image->mt) {
150 assert(image->base.Data);
151
152 image->base.Data = 0;
153 radeon_bo_unmap(image->mt->bo);
154 }
155 }
156
157 static void map_override(GLcontext *ctx, radeonTexObj *t)
158 {
159 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
160
161 radeon_bo_map(t->bo, GL_FALSE);
162
163 img->base.Data = t->bo->ptr;
164 }
165
166 static void unmap_override(GLcontext *ctx, radeonTexObj *t)
167 {
168 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
169
170 radeon_bo_unmap(t->bo);
171
172 img->base.Data = NULL;
173 }
174
175 /**
176 * Map a validated texture for reading during software rendering.
177 */
178 void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
179 {
180 radeonTexObj* t = radeon_tex_obj(texObj);
181 int face, level;
182
183 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
184 "%s(%p, tex %p)\n",
185 __func__, ctx, texObj);
186
187 if (!radeon_validate_texture_miptree(ctx, texObj)) {
188 radeon_error("%s(%p, tex %p) Failed to validate miptree for "
189 "sw fallback.\n",
190 __func__, ctx, texObj);
191 return;
192 }
193
194 if (t->image_override && t->bo) {
195 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
196 "%s(%p, tex %p) Work around for missing miptree in r100.\n",
197 __func__, ctx, texObj);
198
199 map_override(ctx, t);
200 }
201
202 /* for r100 3D sw fallbacks don't have mt */
203 if (!t->mt) {
204 radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
205 __func__, ctx, texObj);
206 return;
207 }
208
209 radeon_bo_map(t->mt->bo, GL_FALSE);
210 for(face = 0; face < t->mt->faces; ++face) {
211 for(level = t->minLod; level <= t->maxLod; ++level)
212 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
213 }
214 }
215
216 void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
217 {
218 radeonTexObj* t = radeon_tex_obj(texObj);
219 int face, level;
220
221 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
222 "%s(%p, tex %p)\n",
223 __func__, ctx, texObj);
224
225 if (t->image_override && t->bo)
226 unmap_override(ctx, t);
227 /* for r100 3D sw fallbacks don't have mt */
228 if (!t->mt)
229 return;
230
231 for(face = 0; face < t->mt->faces; ++face) {
232 for(level = t->minLod; level <= t->maxLod; ++level)
233 texObj->Image[face][level]->Data = 0;
234 }
235 radeon_bo_unmap(t->mt->bo);
236 }
237
238 /**
239 * Wraps Mesa's implementation to ensure that the base level image is mapped.
240 *
241 * This relies on internal details of _mesa_generate_mipmap, in particular
242 * the fact that the memory for recreated texture images is always freed.
243 */
244 static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
245 struct gl_texture_object *texObj)
246 {
247 radeonTexObj* t = radeon_tex_obj(texObj);
248 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
249 int i, face;
250
251 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
252 "%s(%p, tex %p) Target type %s.\n",
253 __func__, ctx, texObj,
254 _mesa_lookup_enum_by_nr(target));
255
256 _mesa_generate_mipmap(ctx, target, texObj);
257
258 for (face = 0; face < nr_faces; face++) {
259 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
260 radeon_texture_image *image;
261
262 image = get_radeon_texture_image(texObj->Image[face][i]);
263
264 if (image == NULL)
265 break;
266
267 image->mtlevel = i;
268 image->mtface = face;
269
270 radeon_miptree_unreference(&image->mt);
271 }
272 }
273
274 }
275
276 void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
277 {
278 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
279 struct radeon_bo *bo;
280 GLuint face = _mesa_tex_target_to_face(target);
281 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
282 bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
283
284 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
285 "%s(%p, target %s, tex %p)\n",
286 __func__, ctx, _mesa_lookup_enum_by_nr(target),
287 texObj);
288
289 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
290 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
291 "%s(%p, tex %p) Trying to generate mipmap for texture "
292 "in processing by GPU.\n",
293 __func__, ctx, texObj);
294 radeon_firevertices(rmesa);
295 }
296
297 radeon_teximage_map(baseimage, GL_FALSE);
298 radeon_generate_mipmap(ctx, target, texObj);
299 radeon_teximage_unmap(baseimage);
300 }
301
302
303 /* try to find a format which will only need a memcopy */
304 static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
305 GLenum srcFormat,
306 GLenum srcType, GLboolean fbo)
307 {
308 const GLuint ui = 1;
309 const GLubyte littleEndian = *((const GLubyte *)&ui);
310
311 /* r100 can only do this */
312 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
313 return _dri_texformat_argb8888;
314
315 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
316 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
317 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
318 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
319 return MESA_FORMAT_RGBA8888;
320 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
321 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
322 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
323 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
324 return MESA_FORMAT_RGBA8888_REV;
325 } else if (IS_R200_CLASS(rmesa->radeonScreen)) {
326 return _dri_texformat_argb8888;
327 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
328 srcType == GL_UNSIGNED_INT_8_8_8_8)) {
329 return MESA_FORMAT_ARGB8888_REV;
330 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
331 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
332 return MESA_FORMAT_ARGB8888;
333 } else
334 return _dri_texformat_argb8888;
335 }
336
337 gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx,
338 GLint internalFormat,
339 GLenum format,
340 GLenum type)
341 {
342 return radeonChooseTextureFormat(ctx, internalFormat, format,
343 type, 0);
344 }
345
346 gl_format radeonChooseTextureFormat(GLcontext * ctx,
347 GLint internalFormat,
348 GLenum format,
349 GLenum type, GLboolean fbo)
350 {
351 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
352 const GLboolean do32bpt =
353 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
354 const GLboolean force16bpt =
355 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
356 (void)format;
357
358 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
359 "%s InternalFormat=%s(%d) type=%s format=%s\n",
360 __func__,
361 _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
362 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
363 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
364 "%s do32bpt=%d force16bpt=%d\n",
365 __func__, do32bpt, force16bpt);
366
367 switch (internalFormat) {
368 case 4:
369 case GL_RGBA:
370 case GL_COMPRESSED_RGBA:
371 switch (type) {
372 case GL_UNSIGNED_INT_10_10_10_2:
373 case GL_UNSIGNED_INT_2_10_10_10_REV:
374 return do32bpt ? _dri_texformat_argb8888 :
375 _dri_texformat_argb1555;
376 case GL_UNSIGNED_SHORT_4_4_4_4:
377 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
378 return _dri_texformat_argb4444;
379 case GL_UNSIGNED_SHORT_5_5_5_1:
380 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
381 return _dri_texformat_argb1555;
382 default:
383 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
384 _dri_texformat_argb4444;
385 }
386
387 case 3:
388 case GL_RGB:
389 case GL_COMPRESSED_RGB:
390 switch (type) {
391 case GL_UNSIGNED_SHORT_4_4_4_4:
392 case GL_UNSIGNED_SHORT_4_4_4_4_REV:
393 return _dri_texformat_argb4444;
394 case GL_UNSIGNED_SHORT_5_5_5_1:
395 case GL_UNSIGNED_SHORT_1_5_5_5_REV:
396 return _dri_texformat_argb1555;
397 case GL_UNSIGNED_SHORT_5_6_5:
398 case GL_UNSIGNED_SHORT_5_6_5_REV:
399 return _dri_texformat_rgb565;
400 default:
401 return do32bpt ? _dri_texformat_argb8888 :
402 _dri_texformat_rgb565;
403 }
404
405 case GL_RGBA8:
406 case GL_RGB10_A2:
407 case GL_RGBA12:
408 case GL_RGBA16:
409 return !force16bpt ?
410 radeonChoose8888TexFormat(rmesa, format, type, fbo) :
411 _dri_texformat_argb4444;
412
413 case GL_RGBA4:
414 case GL_RGBA2:
415 return _dri_texformat_argb4444;
416
417 case GL_RGB5_A1:
418 return _dri_texformat_argb1555;
419
420 case GL_RGB8:
421 case GL_RGB10:
422 case GL_RGB12:
423 case GL_RGB16:
424 return !force16bpt ? _dri_texformat_argb8888 :
425 _dri_texformat_rgb565;
426
427 case GL_RGB5:
428 case GL_RGB4:
429 case GL_R3_G3_B2:
430 return _dri_texformat_rgb565;
431
432 case GL_ALPHA:
433 case GL_ALPHA4:
434 case GL_ALPHA8:
435 case GL_ALPHA12:
436 case GL_ALPHA16:
437 case GL_COMPRESSED_ALPHA:
438 /* r200: can't use a8 format since interpreting hw I8 as a8 would result
439 in wrong rgb values (same as alpha value instead of 0). */
440 if (IS_R200_CLASS(rmesa->radeonScreen))
441 return _dri_texformat_al88;
442 else
443 return _dri_texformat_a8;
444 case 1:
445 case GL_LUMINANCE:
446 case GL_LUMINANCE4:
447 case GL_LUMINANCE8:
448 case GL_LUMINANCE12:
449 case GL_LUMINANCE16:
450 case GL_COMPRESSED_LUMINANCE:
451 return _dri_texformat_l8;
452
453 case 2:
454 case GL_LUMINANCE_ALPHA:
455 case GL_LUMINANCE4_ALPHA4:
456 case GL_LUMINANCE6_ALPHA2:
457 case GL_LUMINANCE8_ALPHA8:
458 case GL_LUMINANCE12_ALPHA4:
459 case GL_LUMINANCE12_ALPHA12:
460 case GL_LUMINANCE16_ALPHA16:
461 case GL_COMPRESSED_LUMINANCE_ALPHA:
462 return _dri_texformat_al88;
463
464 case GL_INTENSITY:
465 case GL_INTENSITY4:
466 case GL_INTENSITY8:
467 case GL_INTENSITY12:
468 case GL_INTENSITY16:
469 case GL_COMPRESSED_INTENSITY:
470 return _dri_texformat_i8;
471
472 case GL_YCBCR_MESA:
473 if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
474 type == GL_UNSIGNED_BYTE)
475 return MESA_FORMAT_YCBCR;
476 else
477 return MESA_FORMAT_YCBCR_REV;
478
479 case GL_RGB_S3TC:
480 case GL_RGB4_S3TC:
481 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
482 return MESA_FORMAT_RGB_DXT1;
483
484 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
485 return MESA_FORMAT_RGBA_DXT1;
486
487 case GL_RGBA_S3TC:
488 case GL_RGBA4_S3TC:
489 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
490 return MESA_FORMAT_RGBA_DXT3;
491
492 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
493 return MESA_FORMAT_RGBA_DXT5;
494
495 case GL_ALPHA16F_ARB:
496 return MESA_FORMAT_ALPHA_FLOAT16;
497 case GL_ALPHA32F_ARB:
498 return MESA_FORMAT_ALPHA_FLOAT32;
499 case GL_LUMINANCE16F_ARB:
500 return MESA_FORMAT_LUMINANCE_FLOAT16;
501 case GL_LUMINANCE32F_ARB:
502 return MESA_FORMAT_LUMINANCE_FLOAT32;
503 case GL_LUMINANCE_ALPHA16F_ARB:
504 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
505 case GL_LUMINANCE_ALPHA32F_ARB:
506 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
507 case GL_INTENSITY16F_ARB:
508 return MESA_FORMAT_INTENSITY_FLOAT16;
509 case GL_INTENSITY32F_ARB:
510 return MESA_FORMAT_INTENSITY_FLOAT32;
511 case GL_RGB16F_ARB:
512 return MESA_FORMAT_RGBA_FLOAT16;
513 case GL_RGB32F_ARB:
514 return MESA_FORMAT_RGBA_FLOAT32;
515 case GL_RGBA16F_ARB:
516 return MESA_FORMAT_RGBA_FLOAT16;
517 case GL_RGBA32F_ARB:
518 return MESA_FORMAT_RGBA_FLOAT32;
519
520 #ifdef RADEON_R300
521 case GL_DEPTH_COMPONENT:
522 case GL_DEPTH_COMPONENT16:
523 return MESA_FORMAT_Z16;
524 case GL_DEPTH_COMPONENT24:
525 case GL_DEPTH_COMPONENT32:
526 case GL_DEPTH_STENCIL_EXT:
527 case GL_DEPTH24_STENCIL8_EXT:
528 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
529 return MESA_FORMAT_S8_Z24;
530 else
531 return MESA_FORMAT_Z16;
532 #else
533 case GL_DEPTH_COMPONENT:
534 case GL_DEPTH_COMPONENT16:
535 case GL_DEPTH_COMPONENT24:
536 case GL_DEPTH_COMPONENT32:
537 case GL_DEPTH_STENCIL_EXT:
538 case GL_DEPTH24_STENCIL8_EXT:
539 return MESA_FORMAT_S8_Z24;
540 #endif
541
542 /* EXT_texture_sRGB */
543 case GL_SRGB:
544 case GL_SRGB8:
545 case GL_SRGB_ALPHA:
546 case GL_SRGB8_ALPHA8:
547 case GL_COMPRESSED_SRGB:
548 case GL_COMPRESSED_SRGB_ALPHA:
549 return MESA_FORMAT_SRGBA8;
550
551 case GL_SLUMINANCE:
552 case GL_SLUMINANCE8:
553 case GL_COMPRESSED_SLUMINANCE:
554 return MESA_FORMAT_SL8;
555
556 case GL_SLUMINANCE_ALPHA:
557 case GL_SLUMINANCE8_ALPHA8:
558 case GL_COMPRESSED_SLUMINANCE_ALPHA:
559 return MESA_FORMAT_SLA8;
560
561 default:
562 _mesa_problem(ctx,
563 "unexpected internalFormat 0x%x in %s",
564 (int)internalFormat, __func__);
565 return MESA_FORMAT_NONE;
566 }
567
568 return MESA_FORMAT_NONE; /* never get here */
569 }
570
571 /** Check if given image is valid within current texture object.
572 */
573 static int image_matches_texture_obj(struct gl_texture_object *texObj,
574 struct gl_texture_image *texImage,
575 unsigned level)
576 {
577 const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
578
579 if (!baseImage)
580 return 0;
581
582 if (level < texObj->BaseLevel || level > texObj->MaxLevel)
583 return 0;
584
585 const unsigned levelDiff = level - texObj->BaseLevel;
586 const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
587 const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
588 const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
589
590 return (texImage->Width == refWidth &&
591 texImage->Height == refHeight &&
592 texImage->Depth == refDepth);
593 }
594
595 static void teximage_assign_miptree(radeonContextPtr rmesa,
596 struct gl_texture_object *texObj,
597 struct gl_texture_image *texImage,
598 unsigned face,
599 unsigned level)
600 {
601 radeonTexObj *t = radeon_tex_obj(texObj);
602 radeon_texture_image* image = get_radeon_texture_image(texImage);
603
604 /* Since miptree holds only images for levels <BaseLevel..MaxLevel>
605 * don't allocate the miptree if the teximage won't fit.
606 */
607 if (!image_matches_texture_obj(texObj, texImage, level))
608 return;
609
610 /* Try using current miptree, or create new if there isn't any */
611 if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
612 radeon_miptree_unreference(&t->mt);
613 radeon_try_alloc_miptree(rmesa, t);
614 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
615 "%s: texObj %p, texImage %p, face %d, level %d, "
616 "texObj miptree doesn't match, allocated new miptree %p\n",
617 __FUNCTION__, texObj, texImage, face, level, t->mt);
618 }
619
620 /* Miptree alocation may have failed,
621 * when there was no image for baselevel specified */
622 if (t->mt) {
623 image->mtface = face;
624 image->mtlevel = level;
625 radeon_miptree_reference(t->mt, &image->mt);
626 } else
627 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
628 "%s Failed to allocate miptree.\n", __func__);
629 }
630
631 static GLuint * allocate_image_offsets(GLcontext *ctx,
632 unsigned alignedWidth,
633 unsigned height,
634 unsigned depth)
635 {
636 int i;
637 GLuint *offsets;
638
639 offsets = malloc(depth * sizeof(GLuint)) ;
640 if (!offsets) {
641 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
642 return NULL;
643 }
644
645 for (i = 0; i < depth; ++i) {
646 offsets[i] = alignedWidth * height * i;
647 }
648
649 return offsets;
650 }
651
652 /**
653 * Update a subregion of the given texture image.
654 */
655 static void radeon_store_teximage(GLcontext* ctx, int dims,
656 GLint xoffset, GLint yoffset, GLint zoffset,
657 GLsizei width, GLsizei height, GLsizei depth,
658 GLsizei imageSize,
659 GLenum format, GLenum type,
660 const GLvoid * pixels,
661 const struct gl_pixelstore_attrib *packing,
662 struct gl_texture_object *texObj,
663 struct gl_texture_image *texImage,
664 int compressed)
665 {
666 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
667 radeonTexObj *t = radeon_tex_obj(texObj);
668 radeon_texture_image* image = get_radeon_texture_image(texImage);
669
670 GLuint dstRowStride;
671 GLuint *dstImageOffsets;
672
673 radeon_print(RADEON_TEXTURE, RADEON_TRACE,
674 "%s(%p, tex %p, image %p) compressed %d\n",
675 __func__, ctx, texObj, texImage, compressed);
676
677 if (image->mt) {
678 dstRowStride = image->mt->levels[image->mtlevel].rowstride;
679 } else if (t->bo) {
680 /* TFP case */
681 dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
682 } else {
683 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
684 }
685
686 assert(dstRowStride);
687
688 if (dims == 3) {
689 unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
690 dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
691 if (!dstImageOffsets) {
692 radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
693 return;
694 }
695 } else {
696 dstImageOffsets = texImage->ImageOffsets;
697 }
698
699 radeon_teximage_map(image, GL_TRUE);
700
701 if (compressed) {
702 uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
703 GLubyte *img_start;
704
705 _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
706
707 if (!image->mt) {
708 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
709 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
710 texImage->TexFormat,
711 texImage->Width, texImage->Data);
712 }
713 else {
714 uint32_t offset;
715 offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
716 offset *= _mesa_get_format_bytes(texImage->TexFormat);
717 img_start = texImage->Data + offset;
718 }
719 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
720 bytesPerRow = srcRowStride;
721 rows = (height + block_height - 1) / block_height;
722
723 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
724 }
725 else {
726 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
727 texImage->TexFormat, texImage->Data,
728 xoffset, yoffset, zoffset,
729 dstRowStride,
730 dstImageOffsets,
731 width, height, depth,
732 format, type, pixels, packing)) {
733 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
734 }
735 }
736
737 if (dims == 3) {
738 free(dstImageOffsets);
739 }
740
741 radeon_teximage_unmap(image);
742 }
743
744 /**
745 * All glTexImage calls go through this function.
746 */
747 static void radeon_teximage(
748 GLcontext *ctx, int dims,
749 GLenum target, GLint level,
750 GLint internalFormat,
751 GLint width, GLint height, GLint depth,
752 GLsizei imageSize,
753 GLenum format, GLenum type, const GLvoid * pixels,
754 const struct gl_pixelstore_attrib *packing,
755 struct gl_texture_object *texObj,
756 struct gl_texture_image *texImage,
757 int compressed)
758 {
759 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
760 radeonTexObj* t = radeon_tex_obj(texObj);
761 radeon_texture_image* image = get_radeon_texture_image(texImage);
762 GLint postConvWidth = width;
763 GLint postConvHeight = height;
764 GLuint face = _mesa_tex_target_to_face(target);
765
766 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
767 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
768 __func__, dims, texObj, texImage, face, level);
769 {
770 struct radeon_bo *bo;
771 bo = !image->mt ? image->bo : image->mt->bo;
772 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
773 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
774 "%s Calling teximage for texture that is "
775 "queued for GPU processing.\n",
776 __func__);
777 radeon_firevertices(rmesa);
778 }
779 }
780
781
782 t->validated = GL_FALSE;
783
784 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
785 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
786 &postConvHeight);
787 }
788
789 if (!_mesa_is_format_compressed(texImage->TexFormat)) {
790 GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
791 /* Minimum pitch of 32 bytes */
792 if (postConvWidth * texelBytes < 32) {
793 postConvWidth = 32 / texelBytes;
794 texImage->RowStride = postConvWidth;
795 }
796 if (!image->mt) {
797 assert(texImage->RowStride == postConvWidth);
798 }
799 }
800
801 /* Mesa core only clears texImage->Data but not image->mt */
802 radeonFreeTexImageData(ctx, texImage);
803
804 if (!t->bo) {
805 teximage_assign_miptree(rmesa, texObj, texImage, face, level);
806 if (!image->mt) {
807 int size = _mesa_format_image_size(texImage->TexFormat,
808 texImage->Width,
809 texImage->Height,
810 texImage->Depth);
811 texImage->Data = _mesa_alloc_texmemory(size);
812 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
813 "%s %dd: texObj %p, texImage %p, "
814 " no miptree assigned, using local memory %p\n",
815 __func__, dims, texObj, texImage, texImage->Data);
816 }
817 }
818
819 /* Upload texture image; note that the spec allows pixels to be NULL */
820 if (compressed) {
821 pixels = _mesa_validate_pbo_compressed_teximage(
822 ctx, imageSize, pixels, packing, "glCompressedTexImage");
823 } else {
824 pixels = _mesa_validate_pbo_teximage(
825 ctx, dims, width, height, depth,
826 format, type, pixels, packing, "glTexImage");
827 }
828
829 if (pixels) {
830 radeon_store_teximage(ctx, dims,
831 0, 0, 0,
832 width, height, depth,
833 imageSize, format, type,
834 pixels, packing,
835 texObj, texImage,
836 compressed);
837 }
838
839 _mesa_unmap_teximage_pbo(ctx, packing);
840 }
841
842 void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
843 GLint internalFormat,
844 GLint width, GLint border,
845 GLenum format, GLenum type, const GLvoid * pixels,
846 const struct gl_pixelstore_attrib *packing,
847 struct gl_texture_object *texObj,
848 struct gl_texture_image *texImage)
849 {
850 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
851 0, format, type, pixels, packing, texObj, texImage, 0);
852 }
853
854 void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
855 GLint internalFormat,
856 GLint width, GLint height, GLint border,
857 GLenum format, GLenum type, const GLvoid * pixels,
858 const struct gl_pixelstore_attrib *packing,
859 struct gl_texture_object *texObj,
860 struct gl_texture_image *texImage)
861
862 {
863 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
864 0, format, type, pixels, packing, texObj, texImage, 0);
865 }
866
867 void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
868 GLint level, GLint internalFormat,
869 GLint width, GLint height, GLint border,
870 GLsizei imageSize, const GLvoid * data,
871 struct gl_texture_object *texObj,
872 struct gl_texture_image *texImage)
873 {
874 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
875 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
876 }
877
878 void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
879 GLint internalFormat,
880 GLint width, GLint height, GLint depth,
881 GLint border,
882 GLenum format, GLenum type, const GLvoid * pixels,
883 const struct gl_pixelstore_attrib *packing,
884 struct gl_texture_object *texObj,
885 struct gl_texture_image *texImage)
886 {
887 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
888 0, format, type, pixels, packing, texObj, texImage, 0);
889 }
890
891 /**
892 * All glTexSubImage calls go through this function.
893 */
894 static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
895 GLint xoffset, GLint yoffset, GLint zoffset,
896 GLsizei width, GLsizei height, GLsizei depth,
897 GLsizei imageSize,
898 GLenum format, GLenum type,
899 const GLvoid * pixels,
900 const struct gl_pixelstore_attrib *packing,
901 struct gl_texture_object *texObj,
902 struct gl_texture_image *texImage,
903 int compressed)
904 {
905 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
906 radeonTexObj* t = radeon_tex_obj(texObj);
907 radeon_texture_image* image = get_radeon_texture_image(texImage);
908
909 radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
910 "%s %dd: texObj %p, texImage %p, face %d, level %d\n",
911 __func__, dims, texObj, texImage,
912 _mesa_tex_target_to_face(target), level);
913 {
914 struct radeon_bo *bo;
915 bo = !image->mt ? image->bo : image->mt->bo;
916 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
917 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
918 "%s Calling texsubimage for texture that is "
919 "queued for GPU processing.\n",
920 __func__);
921 radeon_firevertices(rmesa);
922 }
923 }
924
925
926 t->validated = GL_FALSE;
927 if (compressed) {
928 pixels = _mesa_validate_pbo_compressed_teximage(
929 ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
930 } else {
931 pixels = _mesa_validate_pbo_teximage(ctx, dims,
932 width, height, depth, format, type, pixels, packing, "glTexSubImage");
933 }
934
935 if (pixels) {
936 radeon_store_teximage(ctx, dims,
937 xoffset, yoffset, zoffset,
938 width, height, depth,
939 imageSize, format, type,
940 pixels, packing,
941 texObj, texImage,
942 compressed);
943 }
944
945 _mesa_unmap_teximage_pbo(ctx, packing);
946 }
947
948 void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
949 GLint xoffset,
950 GLsizei width,
951 GLenum format, GLenum type,
952 const GLvoid * pixels,
953 const struct gl_pixelstore_attrib *packing,
954 struct gl_texture_object *texObj,
955 struct gl_texture_image *texImage)
956 {
957 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
958 format, type, pixels, packing, texObj, texImage, 0);
959 }
960
961 void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
962 GLint xoffset, GLint yoffset,
963 GLsizei width, GLsizei height,
964 GLenum format, GLenum type,
965 const GLvoid * pixels,
966 const struct gl_pixelstore_attrib *packing,
967 struct gl_texture_object *texObj,
968 struct gl_texture_image *texImage)
969 {
970 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
971 0, format, type, pixels, packing, texObj, texImage,
972 0);
973 }
974
975 void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
976 GLint level, GLint xoffset,
977 GLint yoffset, GLsizei width,
978 GLsizei height, GLenum format,
979 GLsizei imageSize, const GLvoid * data,
980 struct gl_texture_object *texObj,
981 struct gl_texture_image *texImage)
982 {
983 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
984 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
985 }
986
987
988 void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
989 GLint xoffset, GLint yoffset, GLint zoffset,
990 GLsizei width, GLsizei height, GLsizei depth,
991 GLenum format, GLenum type,
992 const GLvoid * pixels,
993 const struct gl_pixelstore_attrib *packing,
994 struct gl_texture_object *texObj,
995 struct gl_texture_image *texImage)
996 {
997 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
998 format, type, pixels, packing, texObj, texImage, 0);
999 }